]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-2.6.32.59-201203181400.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.59-201203181400.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index e1efc40..4e87324 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -1,15 +1,20 @@
6 *.a
7 *.aux
8 *.bin
9 +*.c.[012].*
10 +*.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 *.eps
18 *.fw
19 +*.gcno
20 *.gen.S
21 *.gif
22 +*.gmo
23 *.grep
24 *.grp
25 *.gz
26 @@ -38,8 +43,10 @@
27 *.tab.h
28 *.tex
29 *.ver
30 +*.vim
31 *.xml
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 *.9
37 @@ -49,11 +56,16 @@
38 53c700_d.h
39 CVS
40 ChangeSet
41 +GPATH
42 +GRTAGS
43 +GSYMS
44 +GTAGS
45 Image
46 Kerntypes
47 Module.markers
48 Module.symvers
49 PENDING
50 +PERF*
51 SCCS
52 System.map*
53 TAGS
54 @@ -76,7 +88,11 @@ btfixupprep
55 build
56 bvmlinux
57 bzImage*
58 +capability_names.h
59 +capflags.c
60 classlist.h*
61 +clut_vga16.c
62 +common-cmds.h
63 comp*.log
64 compile.h*
65 conf
66 @@ -84,6 +100,8 @@ config
67 config-*
68 config_data.h*
69 config_data.gz*
70 +config.c
71 +config.tmp
72 conmakehash
73 consolemap_deftbl.c*
74 cpustr.h
75 @@ -97,19 +115,23 @@ elfconfig.h*
76 fixdep
77 fore200e_mkfirm
78 fore200e_pca_fw.c*
79 +gate.lds
80 gconf
81 gen-devlist
82 gen_crc32table
83 gen_init_cpio
84 genksyms
85 *_gray256.c
86 +hash
87 +hid-example
88 ihex2fw
89 ikconfig.h*
90 initramfs_data.cpio
91 +initramfs_data.cpio.bz2
92 initramfs_data.cpio.gz
93 initramfs_list
94 kallsyms
95 -kconfig
96 +kern_constants.h
97 keywords.c
98 ksym.c*
99 ksym.h*
100 @@ -117,6 +139,7 @@ kxgettext
101 lkc_defs.h
102 lex.c
103 lex.*.c
104 +lib1funcs.S
105 logo_*.c
106 logo_*_clut224.c
107 logo_*_mono.c
108 @@ -127,13 +150,16 @@ machtypes.h
109 map
110 maui_boot.h
111 mconf
112 +mdp
113 miboot*
114 mk_elfconfig
115 mkboot
116 mkbugboot
117 mkcpustr
118 mkdep
119 +mkpiggy
120 mkprep
121 +mkregtable
122 mktables
123 mktree
124 modpost
125 @@ -149,6 +175,7 @@ patches*
126 pca200e.bin
127 pca200e_ecd.bin2
128 piggy.gz
129 +piggy.S
130 piggyback
131 pnmtologo
132 ppc_defs.h*
133 @@ -157,12 +184,15 @@ qconf
134 raid6altivec*.c
135 raid6int*.c
136 raid6tables.c
137 +regdb.c
138 relocs
139 +rlim_names.h
140 series
141 setup
142 setup.bin
143 setup.elf
144 sImage
145 +slabinfo
146 sm_tbl*
147 split-include
148 syscalltab.h
149 @@ -171,6 +201,7 @@ tftpboot.img
150 timeconst.h
151 times.h*
152 trix_boot.h
153 +user_constants.h
154 utsrelease.h*
155 vdso-syms.lds
156 vdso.lds
157 @@ -186,14 +217,20 @@ version.h*
158 vmlinux
159 vmlinux-*
160 vmlinux.aout
161 +vmlinux.bin.all
162 +vmlinux.bin.bz2
163 vmlinux.lds
164 +vmlinux.relocs
165 +voffset.h
166 vsyscall.lds
167 vsyscall_32.lds
168 wanxlfw.inc
169 uImage
170 unifdef
171 +utsrelease.h
172 wakeup.bin
173 wakeup.elf
174 wakeup.lds
175 zImage*
176 zconf.hash.c
177 +zoffset.h
178 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
179 index c840e7d..f4c451c 100644
180 --- a/Documentation/kernel-parameters.txt
181 +++ b/Documentation/kernel-parameters.txt
182 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
183 the specified number of seconds. This is to be used if
184 your oopses keep scrolling off the screen.
185
186 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
187 + virtualization environments that don't cope well with the
188 + expand down segment used by UDEREF on X86-32 or the frequent
189 + page table updates on X86-64.
190 +
191 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
192 +
193 pcbit= [HW,ISDN]
194
195 pcd. [PARIDE]
196 diff --git a/MAINTAINERS b/MAINTAINERS
197 index 613da5d..4fe3eda 100644
198 --- a/MAINTAINERS
199 +++ b/MAINTAINERS
200 @@ -5725,6 +5725,14 @@ L: netdev@vger.kernel.org
201 S: Maintained
202 F: drivers/net/vmxnet3/
203
204 +VMware PVSCSI driver
205 +M: Alok Kataria <akataria@vmware.com>
206 +M: VMware PV-Drivers <pv-drivers@vmware.com>
207 +L: linux-scsi@vger.kernel.org
208 +S: Maintained
209 +F: drivers/scsi/vmw_pvscsi.c
210 +F: drivers/scsi/vmw_pvscsi.h
211 +
212 VOLTAGE AND CURRENT REGULATOR FRAMEWORK
213 M: Liam Girdwood <lrg@slimlogic.co.uk>
214 M: Mark Brown <broonie@opensource.wolfsonmicro.com>
215 diff --git a/Makefile b/Makefile
216 index 3a9a721..e5a22f7 100644
217 --- a/Makefile
218 +++ b/Makefile
219 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
220
221 HOSTCC = gcc
222 HOSTCXX = g++
223 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
224 -HOSTCXXFLAGS = -O2
225 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
226 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
227 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
228
229 # Decide whether to build built-in, modular, or both.
230 # Normally, just do built-in.
231 @@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
232 # Rules shared between *config targets and build targets
233
234 # Basic helpers built in scripts/
235 -PHONY += scripts_basic
236 -scripts_basic:
237 +PHONY += scripts_basic gcc-plugins
238 +scripts_basic: gcc-plugins
239 $(Q)$(MAKE) $(build)=scripts/basic
240
241 # To avoid any implicit rule to kick in, define an empty command.
242 @@ -403,7 +404,7 @@ endif
243 # of make so .config is not included in this case either (for *config).
244
245 no-dot-config-targets := clean mrproper distclean \
246 - cscope TAGS tags help %docs check% \
247 + cscope gtags TAGS tags help %docs check% \
248 include/linux/version.h headers_% \
249 kernelrelease kernelversion
250
251 @@ -526,6 +527,53 @@ else
252 KBUILD_CFLAGS += -O2
253 endif
254
255 +ifndef DISABLE_PAX_PLUGINS
256 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
257 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
258 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
259 +endif
260 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
261 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
262 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
263 +endif
264 +ifdef CONFIG_KALLOCSTAT_PLUGIN
265 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
266 +endif
267 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
268 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
269 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
270 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
271 +endif
272 +ifdef CONFIG_CHECKER_PLUGIN
273 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
274 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
275 +endif
276 +endif
277 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
278 +ifdef CONFIG_PAX_SIZE_OVERFLOW
279 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
280 +endif
281 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
282 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN_CFLAGS)
283 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
284 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
285 +ifeq ($(KBUILD_EXTMOD),)
286 +gcc-plugins:
287 + $(Q)$(MAKE) $(build)=tools/gcc
288 +else
289 +gcc-plugins: ;
290 +endif
291 +else
292 +gcc-plugins:
293 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
294 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
295 +else
296 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
297 +endif
298 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
299 +endif
300 +endif
301 +
302 include $(srctree)/arch/$(SRCARCH)/Makefile
303
304 ifneq ($(CONFIG_FRAME_WARN),0)
305 @@ -647,7 +695,7 @@ export mod_strip_cmd
306
307
308 ifeq ($(KBUILD_EXTMOD),)
309 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
310 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
311
312 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
313 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
314 @@ -868,6 +916,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
315
316 # The actual objects are generated when descending,
317 # make sure no implicit rule kicks in
318 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
319 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
320 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
321
322 # Handle descending into subdirectories listed in $(vmlinux-dirs)
323 @@ -877,7 +927,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
324 # Error messages still appears in the original language
325
326 PHONY += $(vmlinux-dirs)
327 -$(vmlinux-dirs): prepare scripts
328 +$(vmlinux-dirs): gcc-plugins prepare scripts
329 $(Q)$(MAKE) $(build)=$@
330
331 # Build the kernel release string
332 @@ -986,6 +1036,7 @@ prepare0: archprepare FORCE
333 $(Q)$(MAKE) $(build)=. missing-syscalls
334
335 # All the preparing..
336 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
337 prepare: prepare0
338
339 # The asm symlink changes when $(ARCH) changes.
340 @@ -1127,6 +1178,8 @@ all: modules
341 # using awk while concatenating to the final file.
342
343 PHONY += modules
344 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
345 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
346 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
347 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
348 @$(kecho) ' Building modules, stage 2.';
349 @@ -1136,7 +1189,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
350
351 # Target to prepare building external modules
352 PHONY += modules_prepare
353 -modules_prepare: prepare scripts
354 +modules_prepare: gcc-plugins prepare scripts
355
356 # Target to install modules
357 PHONY += modules_install
358 @@ -1201,7 +1254,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
359 include/linux/autoconf.h include/linux/version.h \
360 include/linux/utsrelease.h \
361 include/linux/bounds.h include/asm*/asm-offsets.h \
362 - Module.symvers Module.markers tags TAGS cscope*
363 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
364
365 # clean - Delete most, but leave enough to build external modules
366 #
367 @@ -1245,7 +1298,7 @@ distclean: mrproper
368 @find $(srctree) $(RCS_FIND_IGNORE) \
369 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
370 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
371 - -o -name '.*.rej' -o -size 0 \
372 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
373 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
374 -type f -print | xargs rm -f
375
376 @@ -1292,6 +1345,7 @@ help:
377 @echo ' modules_prepare - Set up for building external modules'
378 @echo ' tags/TAGS - Generate tags file for editors'
379 @echo ' cscope - Generate cscope index'
380 + @echo ' gtags - Generate GNU GLOBAL index'
381 @echo ' kernelrelease - Output the release version string'
382 @echo ' kernelversion - Output the version stored in Makefile'
383 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
384 @@ -1393,6 +1447,8 @@ PHONY += $(module-dirs) modules
385 $(module-dirs): crmodverdir $(objtree)/Module.symvers
386 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
387
388 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
389 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
390 modules: $(module-dirs)
391 @$(kecho) ' Building modules, stage 2.';
392 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
393 @@ -1448,7 +1504,7 @@ endif # KBUILD_EXTMOD
394 quiet_cmd_tags = GEN $@
395 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
396
397 -tags TAGS cscope: FORCE
398 +tags TAGS cscope gtags: FORCE
399 $(call cmd,tags)
400
401 # Scripts to check various things for consistency
402 @@ -1513,17 +1569,21 @@ else
403 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
404 endif
405
406 -%.s: %.c prepare scripts FORCE
407 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
408 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
409 +%.s: %.c gcc-plugins prepare scripts FORCE
410 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
411 %.i: %.c prepare scripts FORCE
412 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
413 -%.o: %.c prepare scripts FORCE
414 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
415 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
416 +%.o: %.c gcc-plugins prepare scripts FORCE
417 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
418 %.lst: %.c prepare scripts FORCE
419 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
420 -%.s: %.S prepare scripts FORCE
421 +%.s: %.S gcc-plugins prepare scripts FORCE
422 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
423 -%.o: %.S prepare scripts FORCE
424 +%.o: %.S gcc-plugins prepare scripts FORCE
425 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
426 %.symtypes: %.c prepare scripts FORCE
427 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
428 @@ -1533,11 +1593,15 @@ endif
429 $(cmd_crmodverdir)
430 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
431 $(build)=$(build-dir)
432 -%/: prepare scripts FORCE
433 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
434 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
435 +%/: gcc-plugins prepare scripts FORCE
436 $(cmd_crmodverdir)
437 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
438 $(build)=$(build-dir)
439 -%.ko: prepare scripts FORCE
440 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
441 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
442 +%.ko: gcc-plugins prepare scripts FORCE
443 $(cmd_crmodverdir)
444 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
445 $(build)=$(build-dir) $(@:.ko=.o)
446 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
447 index 610dff4..f396854 100644
448 --- a/arch/alpha/include/asm/atomic.h
449 +++ b/arch/alpha/include/asm/atomic.h
450 @@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
451 #define atomic_dec(v) atomic_sub(1,(v))
452 #define atomic64_dec(v) atomic64_sub(1,(v))
453
454 +#define atomic64_read_unchecked(v) atomic64_read(v)
455 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
456 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
457 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
458 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
459 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
460 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
461 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
462 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
463 +
464 #define smp_mb__before_atomic_dec() smp_mb()
465 #define smp_mb__after_atomic_dec() smp_mb()
466 #define smp_mb__before_atomic_inc() smp_mb()
467 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
468 index f199e69..af005f5 100644
469 --- a/arch/alpha/include/asm/cache.h
470 +++ b/arch/alpha/include/asm/cache.h
471 @@ -4,19 +4,20 @@
472 #ifndef __ARCH_ALPHA_CACHE_H
473 #define __ARCH_ALPHA_CACHE_H
474
475 +#include <linux/const.h>
476
477 /* Bytes per L1 (data) cache line. */
478 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
479 -# define L1_CACHE_BYTES 64
480 # define L1_CACHE_SHIFT 6
481 #else
482 /* Both EV4 and EV5 are write-through, read-allocate,
483 direct-mapped, physical.
484 */
485 -# define L1_CACHE_BYTES 32
486 # define L1_CACHE_SHIFT 5
487 #endif
488
489 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
490 +
491 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
492 #define SMP_CACHE_BYTES L1_CACHE_BYTES
493
494 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
495 index 5c75c1b..c82f878 100644
496 --- a/arch/alpha/include/asm/elf.h
497 +++ b/arch/alpha/include/asm/elf.h
498 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
499
500 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
501
502 +#ifdef CONFIG_PAX_ASLR
503 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
504 +
505 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
506 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
507 +#endif
508 +
509 /* $0 is set by ld.so to a pointer to a function which might be
510 registered using atexit. This provides a mean for the dynamic
511 linker to call DT_FINI functions for shared libraries that have
512 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
513 index 3f0c59f..cf1e100 100644
514 --- a/arch/alpha/include/asm/pgtable.h
515 +++ b/arch/alpha/include/asm/pgtable.h
516 @@ -101,6 +101,17 @@ struct vm_area_struct;
517 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
518 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
519 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
520 +
521 +#ifdef CONFIG_PAX_PAGEEXEC
522 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
523 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
524 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
525 +#else
526 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
527 +# define PAGE_COPY_NOEXEC PAGE_COPY
528 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
529 +#endif
530 +
531 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
532
533 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
534 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
535 index ebc3c89..20cfa63 100644
536 --- a/arch/alpha/kernel/module.c
537 +++ b/arch/alpha/kernel/module.c
538 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
539
540 /* The small sections were sorted to the end of the segment.
541 The following should definitely cover them. */
542 - gp = (u64)me->module_core + me->core_size - 0x8000;
543 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
544 got = sechdrs[me->arch.gotsecindex].sh_addr;
545
546 for (i = 0; i < n; i++) {
547 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
548 index a94e49c..d71dd44 100644
549 --- a/arch/alpha/kernel/osf_sys.c
550 +++ b/arch/alpha/kernel/osf_sys.c
551 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
552 /* At this point: (!vma || addr < vma->vm_end). */
553 if (limit - len < addr)
554 return -ENOMEM;
555 - if (!vma || addr + len <= vma->vm_start)
556 + if (check_heap_stack_gap(vma, addr, len))
557 return addr;
558 addr = vma->vm_end;
559 vma = vma->vm_next;
560 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
561 merely specific addresses, but regions of memory -- perhaps
562 this feature should be incorporated into all ports? */
563
564 +#ifdef CONFIG_PAX_RANDMMAP
565 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
566 +#endif
567 +
568 if (addr) {
569 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
570 if (addr != (unsigned long) -ENOMEM)
571 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
572 }
573
574 /* Next, try allocating at TASK_UNMAPPED_BASE. */
575 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
576 - len, limit);
577 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
578 +
579 if (addr != (unsigned long) -ENOMEM)
580 return addr;
581
582 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
583 index 00a31de..2ded0f2 100644
584 --- a/arch/alpha/mm/fault.c
585 +++ b/arch/alpha/mm/fault.c
586 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
587 __reload_thread(pcb);
588 }
589
590 +#ifdef CONFIG_PAX_PAGEEXEC
591 +/*
592 + * PaX: decide what to do with offenders (regs->pc = fault address)
593 + *
594 + * returns 1 when task should be killed
595 + * 2 when patched PLT trampoline was detected
596 + * 3 when unpatched PLT trampoline was detected
597 + */
598 +static int pax_handle_fetch_fault(struct pt_regs *regs)
599 +{
600 +
601 +#ifdef CONFIG_PAX_EMUPLT
602 + int err;
603 +
604 + do { /* PaX: patched PLT emulation #1 */
605 + unsigned int ldah, ldq, jmp;
606 +
607 + err = get_user(ldah, (unsigned int *)regs->pc);
608 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
609 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
610 +
611 + if (err)
612 + break;
613 +
614 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
615 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
616 + jmp == 0x6BFB0000U)
617 + {
618 + unsigned long r27, addr;
619 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
620 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
621 +
622 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
623 + err = get_user(r27, (unsigned long *)addr);
624 + if (err)
625 + break;
626 +
627 + regs->r27 = r27;
628 + regs->pc = r27;
629 + return 2;
630 + }
631 + } while (0);
632 +
633 + do { /* PaX: patched PLT emulation #2 */
634 + unsigned int ldah, lda, br;
635 +
636 + err = get_user(ldah, (unsigned int *)regs->pc);
637 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
638 + err |= get_user(br, (unsigned int *)(regs->pc+8));
639 +
640 + if (err)
641 + break;
642 +
643 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
644 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
645 + (br & 0xFFE00000U) == 0xC3E00000U)
646 + {
647 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
648 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
649 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
650 +
651 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
652 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
653 + return 2;
654 + }
655 + } while (0);
656 +
657 + do { /* PaX: unpatched PLT emulation */
658 + unsigned int br;
659 +
660 + err = get_user(br, (unsigned int *)regs->pc);
661 +
662 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
663 + unsigned int br2, ldq, nop, jmp;
664 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
665 +
666 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
667 + err = get_user(br2, (unsigned int *)addr);
668 + err |= get_user(ldq, (unsigned int *)(addr+4));
669 + err |= get_user(nop, (unsigned int *)(addr+8));
670 + err |= get_user(jmp, (unsigned int *)(addr+12));
671 + err |= get_user(resolver, (unsigned long *)(addr+16));
672 +
673 + if (err)
674 + break;
675 +
676 + if (br2 == 0xC3600000U &&
677 + ldq == 0xA77B000CU &&
678 + nop == 0x47FF041FU &&
679 + jmp == 0x6B7B0000U)
680 + {
681 + regs->r28 = regs->pc+4;
682 + regs->r27 = addr+16;
683 + regs->pc = resolver;
684 + return 3;
685 + }
686 + }
687 + } while (0);
688 +#endif
689 +
690 + return 1;
691 +}
692 +
693 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
694 +{
695 + unsigned long i;
696 +
697 + printk(KERN_ERR "PAX: bytes at PC: ");
698 + for (i = 0; i < 5; i++) {
699 + unsigned int c;
700 + if (get_user(c, (unsigned int *)pc+i))
701 + printk(KERN_CONT "???????? ");
702 + else
703 + printk(KERN_CONT "%08x ", c);
704 + }
705 + printk("\n");
706 +}
707 +#endif
708
709 /*
710 * This routine handles page faults. It determines the address,
711 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
712 good_area:
713 si_code = SEGV_ACCERR;
714 if (cause < 0) {
715 - if (!(vma->vm_flags & VM_EXEC))
716 + if (!(vma->vm_flags & VM_EXEC)) {
717 +
718 +#ifdef CONFIG_PAX_PAGEEXEC
719 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
720 + goto bad_area;
721 +
722 + up_read(&mm->mmap_sem);
723 + switch (pax_handle_fetch_fault(regs)) {
724 +
725 +#ifdef CONFIG_PAX_EMUPLT
726 + case 2:
727 + case 3:
728 + return;
729 +#endif
730 +
731 + }
732 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
733 + do_group_exit(SIGKILL);
734 +#else
735 goto bad_area;
736 +#endif
737 +
738 + }
739 } else if (!cause) {
740 /* Allow reads even for write-only mappings */
741 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
742 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
743 index b68faef..6dd1496 100644
744 --- a/arch/arm/Kconfig
745 +++ b/arch/arm/Kconfig
746 @@ -14,6 +14,7 @@ config ARM
747 select SYS_SUPPORTS_APM_EMULATION
748 select HAVE_OPROFILE
749 select HAVE_ARCH_KGDB
750 + select GENERIC_ATOMIC64
751 select HAVE_KPROBES if (!XIP_KERNEL)
752 select HAVE_KRETPROBES if (HAVE_KPROBES)
753 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
754 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
755 index d0daeab..ca7e10e 100644
756 --- a/arch/arm/include/asm/atomic.h
757 +++ b/arch/arm/include/asm/atomic.h
758 @@ -15,6 +15,10 @@
759 #include <linux/types.h>
760 #include <asm/system.h>
761
762 +#ifdef CONFIG_GENERIC_ATOMIC64
763 +#include <asm-generic/atomic64.h>
764 +#endif
765 +
766 #define ATOMIC_INIT(i) { (i) }
767
768 #ifdef __KERNEL__
769 @@ -24,8 +28,16 @@
770 * strex/ldrex monitor on some implementations. The reason we can use it for
771 * atomic_set() is the clrex or dummy strex done on every exception return.
772 */
773 -#define atomic_read(v) ((v)->counter)
774 +#define atomic_read(v) (*(volatile int *)&(v)->counter)
775 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
776 +{
777 + return v->counter;
778 +}
779 #define atomic_set(v,i) (((v)->counter) = (i))
780 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
781 +{
782 + v->counter = i;
783 +}
784
785 #if __LINUX_ARM_ARCH__ >= 6
786
787 @@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
788 int result;
789
790 __asm__ __volatile__("@ atomic_add\n"
791 +"1: ldrex %1, [%2]\n"
792 +" add %0, %1, %3\n"
793 +
794 +#ifdef CONFIG_PAX_REFCOUNT
795 +" bvc 3f\n"
796 +"2: bkpt 0xf103\n"
797 +"3:\n"
798 +#endif
799 +
800 +" strex %1, %0, [%2]\n"
801 +" teq %1, #0\n"
802 +" bne 1b"
803 +
804 +#ifdef CONFIG_PAX_REFCOUNT
805 +"\n4:\n"
806 + _ASM_EXTABLE(2b, 4b)
807 +#endif
808 +
809 + : "=&r" (result), "=&r" (tmp)
810 + : "r" (&v->counter), "Ir" (i)
811 + : "cc");
812 +}
813 +
814 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
815 +{
816 + unsigned long tmp;
817 + int result;
818 +
819 + __asm__ __volatile__("@ atomic_add_unchecked\n"
820 "1: ldrex %0, [%2]\n"
821 " add %0, %0, %3\n"
822 " strex %1, %0, [%2]\n"
823 @@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
824 smp_mb();
825
826 __asm__ __volatile__("@ atomic_add_return\n"
827 +"1: ldrex %1, [%2]\n"
828 +" add %0, %1, %3\n"
829 +
830 +#ifdef CONFIG_PAX_REFCOUNT
831 +" bvc 3f\n"
832 +" mov %0, %1\n"
833 +"2: bkpt 0xf103\n"
834 +"3:\n"
835 +#endif
836 +
837 +" strex %1, %0, [%2]\n"
838 +" teq %1, #0\n"
839 +" bne 1b"
840 +
841 +#ifdef CONFIG_PAX_REFCOUNT
842 +"\n4:\n"
843 + _ASM_EXTABLE(2b, 4b)
844 +#endif
845 +
846 + : "=&r" (result), "=&r" (tmp)
847 + : "r" (&v->counter), "Ir" (i)
848 + : "cc");
849 +
850 + smp_mb();
851 +
852 + return result;
853 +}
854 +
855 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
856 +{
857 + unsigned long tmp;
858 + int result;
859 +
860 + smp_mb();
861 +
862 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
863 "1: ldrex %0, [%2]\n"
864 " add %0, %0, %3\n"
865 " strex %1, %0, [%2]\n"
866 @@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
867 int result;
868
869 __asm__ __volatile__("@ atomic_sub\n"
870 +"1: ldrex %1, [%2]\n"
871 +" sub %0, %1, %3\n"
872 +
873 +#ifdef CONFIG_PAX_REFCOUNT
874 +" bvc 3f\n"
875 +"2: bkpt 0xf103\n"
876 +"3:\n"
877 +#endif
878 +
879 +" strex %1, %0, [%2]\n"
880 +" teq %1, #0\n"
881 +" bne 1b"
882 +
883 +#ifdef CONFIG_PAX_REFCOUNT
884 +"\n4:\n"
885 + _ASM_EXTABLE(2b, 4b)
886 +#endif
887 +
888 + : "=&r" (result), "=&r" (tmp)
889 + : "r" (&v->counter), "Ir" (i)
890 + : "cc");
891 +}
892 +
893 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
894 +{
895 + unsigned long tmp;
896 + int result;
897 +
898 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
899 "1: ldrex %0, [%2]\n"
900 " sub %0, %0, %3\n"
901 " strex %1, %0, [%2]\n"
902 @@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
903 smp_mb();
904
905 __asm__ __volatile__("@ atomic_sub_return\n"
906 -"1: ldrex %0, [%2]\n"
907 -" sub %0, %0, %3\n"
908 +"1: ldrex %1, [%2]\n"
909 +" sub %0, %1, %3\n"
910 +
911 +#ifdef CONFIG_PAX_REFCOUNT
912 +" bvc 3f\n"
913 +" mov %0, %1\n"
914 +"2: bkpt 0xf103\n"
915 +"3:\n"
916 +#endif
917 +
918 " strex %1, %0, [%2]\n"
919 " teq %1, #0\n"
920 " bne 1b"
921 +
922 +#ifdef CONFIG_PAX_REFCOUNT
923 +"\n4:\n"
924 + _ASM_EXTABLE(2b, 4b)
925 +#endif
926 +
927 : "=&r" (result), "=&r" (tmp)
928 : "r" (&v->counter), "Ir" (i)
929 : "cc");
930 @@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
931 return oldval;
932 }
933
934 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
935 +{
936 + unsigned long oldval, res;
937 +
938 + smp_mb();
939 +
940 + do {
941 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
942 + "ldrex %1, [%2]\n"
943 + "mov %0, #0\n"
944 + "teq %1, %3\n"
945 + "strexeq %0, %4, [%2]\n"
946 + : "=&r" (res), "=&r" (oldval)
947 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
948 + : "cc");
949 + } while (res);
950 +
951 + smp_mb();
952 +
953 + return oldval;
954 +}
955 +
956 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
957 {
958 unsigned long tmp, tmp2;
959 @@ -207,6 +349,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
960 #endif /* __LINUX_ARM_ARCH__ */
961
962 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
963 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
964 +{
965 + return xchg(&v->counter, new);
966 +}
967
968 static inline int atomic_add_unless(atomic_t *v, int a, int u)
969 {
970 @@ -220,11 +366,27 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
971 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
972
973 #define atomic_inc(v) atomic_add(1, v)
974 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
975 +{
976 + atomic_add_unchecked(1, v);
977 +}
978 #define atomic_dec(v) atomic_sub(1, v)
979 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
980 +{
981 + atomic_sub_unchecked(1, v);
982 +}
983
984 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
985 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
986 +{
987 + return atomic_add_return_unchecked(1, v) == 0;
988 +}
989 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
990 #define atomic_inc_return(v) (atomic_add_return(1, v))
991 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
992 +{
993 + return atomic_add_return_unchecked(1, v);
994 +}
995 #define atomic_dec_return(v) (atomic_sub_return(1, v))
996 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
997
998 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
999 index 66c160b..bca1449 100644
1000 --- a/arch/arm/include/asm/cache.h
1001 +++ b/arch/arm/include/asm/cache.h
1002 @@ -4,8 +4,10 @@
1003 #ifndef __ASMARM_CACHE_H
1004 #define __ASMARM_CACHE_H
1005
1006 +#include <linux/const.h>
1007 +
1008 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1009 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1010 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1011
1012 /*
1013 * Memory returned by kmalloc() may be used for DMA, so we must make
1014 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1015 index 3d0cdd2..19957c5 100644
1016 --- a/arch/arm/include/asm/cacheflush.h
1017 +++ b/arch/arm/include/asm/cacheflush.h
1018 @@ -216,13 +216,13 @@ struct cpu_cache_fns {
1019 void (*dma_inv_range)(const void *, const void *);
1020 void (*dma_clean_range)(const void *, const void *);
1021 void (*dma_flush_range)(const void *, const void *);
1022 -};
1023 +} __no_const;
1024
1025 struct outer_cache_fns {
1026 void (*inv_range)(unsigned long, unsigned long);
1027 void (*clean_range)(unsigned long, unsigned long);
1028 void (*flush_range)(unsigned long, unsigned long);
1029 -};
1030 +} __no_const;
1031
1032 /*
1033 * Select the calling method
1034 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1035 index 6aac3f5..265536b 100644
1036 --- a/arch/arm/include/asm/elf.h
1037 +++ b/arch/arm/include/asm/elf.h
1038 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1039 the loader. We need to make sure that it is out of the way of the program
1040 that it will "exec", and that there is sufficient room for the brk. */
1041
1042 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1043 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1044 +
1045 +#ifdef CONFIG_PAX_ASLR
1046 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1047 +
1048 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1049 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1050 +#endif
1051
1052 /* When the program starts, a1 contains a pointer to a function to be
1053 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1054 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1055 index c019949..388fdd1 100644
1056 --- a/arch/arm/include/asm/kmap_types.h
1057 +++ b/arch/arm/include/asm/kmap_types.h
1058 @@ -19,6 +19,7 @@ enum km_type {
1059 KM_SOFTIRQ0,
1060 KM_SOFTIRQ1,
1061 KM_L2_CACHE,
1062 + KM_CLEARPAGE,
1063 KM_TYPE_NR
1064 };
1065
1066 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1067 index 3a32af4..c8def8a 100644
1068 --- a/arch/arm/include/asm/page.h
1069 +++ b/arch/arm/include/asm/page.h
1070 @@ -122,7 +122,7 @@ struct cpu_user_fns {
1071 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1072 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1073 unsigned long vaddr);
1074 -};
1075 +} __no_const;
1076
1077 #ifdef MULTI_USER
1078 extern struct cpu_user_fns cpu_user;
1079 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1080 index d65b2f5..9d87555 100644
1081 --- a/arch/arm/include/asm/system.h
1082 +++ b/arch/arm/include/asm/system.h
1083 @@ -86,6 +86,8 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
1084
1085 #define xchg(ptr,x) \
1086 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1087 +#define xchg_unchecked(ptr,x) \
1088 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1089
1090 extern asmlinkage void __backtrace(void);
1091 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1092 @@ -98,7 +100,7 @@ extern int cpu_architecture(void);
1093 extern void cpu_init(void);
1094
1095 void arm_machine_restart(char mode, const char *cmd);
1096 -extern void (*arm_pm_restart)(char str, const char *cmd);
1097 +extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
1098
1099 #define UDBG_UNDEFINED (1 << 0)
1100 #define UDBG_SYSCALL (1 << 1)
1101 @@ -505,6 +507,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1102
1103 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1104
1105 +#define _ASM_EXTABLE(from, to) \
1106 +" .pushsection __ex_table,\"a\"\n"\
1107 +" .align 3\n" \
1108 +" .long " #from ", " #to"\n" \
1109 +" .popsection"
1110 +
1111 +
1112 #endif /* __ASSEMBLY__ */
1113
1114 #define arch_align_stack(x) (x)
1115 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1116 index 1d6bd40..fba0cb9 100644
1117 --- a/arch/arm/include/asm/uaccess.h
1118 +++ b/arch/arm/include/asm/uaccess.h
1119 @@ -22,6 +22,8 @@
1120 #define VERIFY_READ 0
1121 #define VERIFY_WRITE 1
1122
1123 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1124 +
1125 /*
1126 * The exception table consists of pairs of addresses: the first is the
1127 * address of an instruction that is allowed to fault, and the second is
1128 @@ -387,8 +389,23 @@ do { \
1129
1130
1131 #ifdef CONFIG_MMU
1132 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1133 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1134 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1135 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1136 +
1137 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1138 +{
1139 + if (!__builtin_constant_p(n))
1140 + check_object_size(to, n, false);
1141 + return ___copy_from_user(to, from, n);
1142 +}
1143 +
1144 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1145 +{
1146 + if (!__builtin_constant_p(n))
1147 + check_object_size(from, n, true);
1148 + return ___copy_to_user(to, from, n);
1149 +}
1150 +
1151 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1152 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1153 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1154 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1155
1156 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1157 {
1158 + if ((long)n < 0)
1159 + return n;
1160 +
1161 if (access_ok(VERIFY_READ, from, n))
1162 n = __copy_from_user(to, from, n);
1163 else /* security hole - plug it */
1164 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1165
1166 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1167 {
1168 + if ((long)n < 0)
1169 + return n;
1170 +
1171 if (access_ok(VERIFY_WRITE, to, n))
1172 n = __copy_to_user(to, from, n);
1173 return n;
1174 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1175 index 0e62770..e2c2cd6 100644
1176 --- a/arch/arm/kernel/armksyms.c
1177 +++ b/arch/arm/kernel/armksyms.c
1178 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1179 #ifdef CONFIG_MMU
1180 EXPORT_SYMBOL(copy_page);
1181
1182 -EXPORT_SYMBOL(__copy_from_user);
1183 -EXPORT_SYMBOL(__copy_to_user);
1184 +EXPORT_SYMBOL(___copy_from_user);
1185 +EXPORT_SYMBOL(___copy_to_user);
1186 EXPORT_SYMBOL(__clear_user);
1187
1188 EXPORT_SYMBOL(__get_user_1);
1189 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
1190 index ba8ccfe..2dc34dc 100644
1191 --- a/arch/arm/kernel/kgdb.c
1192 +++ b/arch/arm/kernel/kgdb.c
1193 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
1194 * and we handle the normal undef case within the do_undefinstr
1195 * handler.
1196 */
1197 -struct kgdb_arch arch_kgdb_ops = {
1198 +const struct kgdb_arch arch_kgdb_ops = {
1199 #ifndef __ARMEB__
1200 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
1201 #else /* ! __ARMEB__ */
1202 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1203 index 61f90d3..771ab27 100644
1204 --- a/arch/arm/kernel/process.c
1205 +++ b/arch/arm/kernel/process.c
1206 @@ -83,7 +83,7 @@ static int __init hlt_setup(char *__unused)
1207 __setup("nohlt", nohlt_setup);
1208 __setup("hlt", hlt_setup);
1209
1210 -void arm_machine_restart(char mode, const char *cmd)
1211 +__noreturn void arm_machine_restart(char mode, const char *cmd)
1212 {
1213 /*
1214 * Clean and disable cache, and turn off interrupts
1215 @@ -117,7 +117,7 @@ void arm_machine_restart(char mode, const char *cmd)
1216 void (*pm_power_off)(void);
1217 EXPORT_SYMBOL(pm_power_off);
1218
1219 -void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
1220 +void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
1221 EXPORT_SYMBOL_GPL(arm_pm_restart);
1222
1223
1224 @@ -195,6 +195,7 @@ __setup("reboot=", reboot_setup);
1225
1226 void machine_halt(void)
1227 {
1228 + BUG();
1229 }
1230
1231
1232 @@ -202,6 +203,7 @@ void machine_power_off(void)
1233 {
1234 if (pm_power_off)
1235 pm_power_off();
1236 + BUG();
1237 }
1238
1239 void machine_restart(char *cmd)
1240 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1241 index c6c57b6..0c3b29e 100644
1242 --- a/arch/arm/kernel/setup.c
1243 +++ b/arch/arm/kernel/setup.c
1244 @@ -92,16 +92,16 @@ EXPORT_SYMBOL(elf_hwcap);
1245 struct processor processor;
1246 #endif
1247 #ifdef MULTI_TLB
1248 -struct cpu_tlb_fns cpu_tlb;
1249 +struct cpu_tlb_fns cpu_tlb __read_only;
1250 #endif
1251 #ifdef MULTI_USER
1252 -struct cpu_user_fns cpu_user;
1253 +struct cpu_user_fns cpu_user __read_only;
1254 #endif
1255 #ifdef MULTI_CACHE
1256 -struct cpu_cache_fns cpu_cache;
1257 +struct cpu_cache_fns cpu_cache __read_only;
1258 #endif
1259 #ifdef CONFIG_OUTER_CACHE
1260 -struct outer_cache_fns outer_cache;
1261 +struct outer_cache_fns outer_cache __read_only;
1262 #endif
1263
1264 struct stack {
1265 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1266 index 3f361a7..6e806e1 100644
1267 --- a/arch/arm/kernel/traps.c
1268 +++ b/arch/arm/kernel/traps.c
1269 @@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
1270
1271 DEFINE_SPINLOCK(die_lock);
1272
1273 +extern void gr_handle_kernel_exploit(void);
1274 +
1275 /*
1276 * This function is protected against re-entrancy.
1277 */
1278 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
1279 if (panic_on_oops)
1280 panic("Fatal exception");
1281
1282 + gr_handle_kernel_exploit();
1283 +
1284 do_exit(SIGSEGV);
1285 }
1286
1287 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
1288 index aecf87df..bed731b 100644
1289 --- a/arch/arm/kernel/vmlinux.lds.S
1290 +++ b/arch/arm/kernel/vmlinux.lds.S
1291 @@ -74,14 +74,18 @@ SECTIONS
1292 #ifndef CONFIG_XIP_KERNEL
1293 __init_begin = _stext;
1294 INIT_DATA
1295 + EXIT_TEXT
1296 + EXIT_DATA
1297 . = ALIGN(PAGE_SIZE);
1298 __init_end = .;
1299 #endif
1300 }
1301
1302 /DISCARD/ : { /* Exit code and data */
1303 +#ifdef CONFIG_XIP_KERNEL
1304 EXIT_TEXT
1305 EXIT_DATA
1306 +#endif
1307 *(.exitcall.exit)
1308 *(.discard)
1309 *(.ARM.exidx.exit.text)
1310 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1311 index e4fe124..0fc246b 100644
1312 --- a/arch/arm/lib/copy_from_user.S
1313 +++ b/arch/arm/lib/copy_from_user.S
1314 @@ -16,7 +16,7 @@
1315 /*
1316 * Prototype:
1317 *
1318 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1319 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1320 *
1321 * Purpose:
1322 *
1323 @@ -84,11 +84,11 @@
1324
1325 .text
1326
1327 -ENTRY(__copy_from_user)
1328 +ENTRY(___copy_from_user)
1329
1330 #include "copy_template.S"
1331
1332 -ENDPROC(__copy_from_user)
1333 +ENDPROC(___copy_from_user)
1334
1335 .section .fixup,"ax"
1336 .align 0
1337 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1338 index 6ee2f67..d1cce76 100644
1339 --- a/arch/arm/lib/copy_page.S
1340 +++ b/arch/arm/lib/copy_page.S
1341 @@ -10,6 +10,7 @@
1342 * ASM optimised string functions
1343 */
1344 #include <linux/linkage.h>
1345 +#include <linux/const.h>
1346 #include <asm/assembler.h>
1347 #include <asm/asm-offsets.h>
1348 #include <asm/cache.h>
1349 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1350 index 1a71e15..ac7b258 100644
1351 --- a/arch/arm/lib/copy_to_user.S
1352 +++ b/arch/arm/lib/copy_to_user.S
1353 @@ -16,7 +16,7 @@
1354 /*
1355 * Prototype:
1356 *
1357 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1358 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1359 *
1360 * Purpose:
1361 *
1362 @@ -88,11 +88,11 @@
1363 .text
1364
1365 ENTRY(__copy_to_user_std)
1366 -WEAK(__copy_to_user)
1367 +WEAK(___copy_to_user)
1368
1369 #include "copy_template.S"
1370
1371 -ENDPROC(__copy_to_user)
1372 +ENDPROC(___copy_to_user)
1373
1374 .section .fixup,"ax"
1375 .align 0
1376 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1377 index ffdd274..91017b6 100644
1378 --- a/arch/arm/lib/uaccess.S
1379 +++ b/arch/arm/lib/uaccess.S
1380 @@ -19,7 +19,7 @@
1381
1382 #define PAGE_SHIFT 12
1383
1384 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1385 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1386 * Purpose : copy a block to user memory from kernel memory
1387 * Params : to - user memory
1388 * : from - kernel memory
1389 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
1390 sub r2, r2, ip
1391 b .Lc2u_dest_aligned
1392
1393 -ENTRY(__copy_to_user)
1394 +ENTRY(___copy_to_user)
1395 stmfd sp!, {r2, r4 - r7, lr}
1396 cmp r2, #4
1397 blt .Lc2u_not_enough
1398 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
1399 ldrgtb r3, [r1], #0
1400 USER( strgtbt r3, [r0], #1) @ May fault
1401 b .Lc2u_finished
1402 -ENDPROC(__copy_to_user)
1403 +ENDPROC(___copy_to_user)
1404
1405 .section .fixup,"ax"
1406 .align 0
1407 9001: ldmfd sp!, {r0, r4 - r7, pc}
1408 .previous
1409
1410 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1411 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1412 * Purpose : copy a block from user memory to kernel memory
1413 * Params : to - kernel memory
1414 * : from - user memory
1415 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
1416 sub r2, r2, ip
1417 b .Lcfu_dest_aligned
1418
1419 -ENTRY(__copy_from_user)
1420 +ENTRY(___copy_from_user)
1421 stmfd sp!, {r0, r2, r4 - r7, lr}
1422 cmp r2, #4
1423 blt .Lcfu_not_enough
1424 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
1425 USER( ldrgtbt r3, [r1], #1) @ May fault
1426 strgtb r3, [r0], #1
1427 b .Lcfu_finished
1428 -ENDPROC(__copy_from_user)
1429 +ENDPROC(___copy_from_user)
1430
1431 .section .fixup,"ax"
1432 .align 0
1433 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1434 index 6b967ff..67d5b2b 100644
1435 --- a/arch/arm/lib/uaccess_with_memcpy.c
1436 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1437 @@ -97,7 +97,7 @@ out:
1438 }
1439
1440 unsigned long
1441 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1442 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1443 {
1444 /*
1445 * This test is stubbed out of the main function above to keep
1446 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
1447 index 4028724..beec230 100644
1448 --- a/arch/arm/mach-at91/pm.c
1449 +++ b/arch/arm/mach-at91/pm.c
1450 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
1451 }
1452
1453
1454 -static struct platform_suspend_ops at91_pm_ops ={
1455 +static const struct platform_suspend_ops at91_pm_ops ={
1456 .valid = at91_pm_valid_state,
1457 .begin = at91_pm_begin,
1458 .enter = at91_pm_enter,
1459 diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
1460 index 5218943..0a34552 100644
1461 --- a/arch/arm/mach-omap1/pm.c
1462 +++ b/arch/arm/mach-omap1/pm.c
1463 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
1464
1465
1466
1467 -static struct platform_suspend_ops omap_pm_ops ={
1468 +static const struct platform_suspend_ops omap_pm_ops ={
1469 .prepare = omap_pm_prepare,
1470 .enter = omap_pm_enter,
1471 .finish = omap_pm_finish,
1472 diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1473 index bff5c4e..d4c649b 100644
1474 --- a/arch/arm/mach-omap2/pm24xx.c
1475 +++ b/arch/arm/mach-omap2/pm24xx.c
1476 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1477 enable_hlt();
1478 }
1479
1480 -static struct platform_suspend_ops omap_pm_ops = {
1481 +static const struct platform_suspend_ops omap_pm_ops = {
1482 .prepare = omap2_pm_prepare,
1483 .enter = omap2_pm_enter,
1484 .finish = omap2_pm_finish,
1485 diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1486 index 8946319..7d3e661 100644
1487 --- a/arch/arm/mach-omap2/pm34xx.c
1488 +++ b/arch/arm/mach-omap2/pm34xx.c
1489 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1490 return;
1491 }
1492
1493 -static struct platform_suspend_ops omap_pm_ops = {
1494 +static const struct platform_suspend_ops omap_pm_ops = {
1495 .begin = omap3_pm_begin,
1496 .end = omap3_pm_end,
1497 .prepare = omap3_pm_prepare,
1498 diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1499 index b3d8d53..6e68ebc 100644
1500 --- a/arch/arm/mach-pnx4008/pm.c
1501 +++ b/arch/arm/mach-pnx4008/pm.c
1502 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1503 (state == PM_SUSPEND_MEM);
1504 }
1505
1506 -static struct platform_suspend_ops pnx4008_pm_ops = {
1507 +static const struct platform_suspend_ops pnx4008_pm_ops = {
1508 .enter = pnx4008_pm_enter,
1509 .valid = pnx4008_pm_valid,
1510 };
1511 diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1512 index 7693355..9beb00a 100644
1513 --- a/arch/arm/mach-pxa/pm.c
1514 +++ b/arch/arm/mach-pxa/pm.c
1515 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1516 pxa_cpu_pm_fns->finish();
1517 }
1518
1519 -static struct platform_suspend_ops pxa_pm_ops = {
1520 +static const struct platform_suspend_ops pxa_pm_ops = {
1521 .valid = pxa_pm_valid,
1522 .enter = pxa_pm_enter,
1523 .prepare = pxa_pm_prepare,
1524 diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1525 index 629e05d..06be589 100644
1526 --- a/arch/arm/mach-pxa/sharpsl_pm.c
1527 +++ b/arch/arm/mach-pxa/sharpsl_pm.c
1528 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1529 }
1530
1531 #ifdef CONFIG_PM
1532 -static struct platform_suspend_ops sharpsl_pm_ops = {
1533 +static const struct platform_suspend_ops sharpsl_pm_ops = {
1534 .prepare = pxa_pm_prepare,
1535 .finish = pxa_pm_finish,
1536 .enter = corgi_pxa_pm_enter,
1537 diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1538 index c83fdc8..ab9fc44 100644
1539 --- a/arch/arm/mach-sa1100/pm.c
1540 +++ b/arch/arm/mach-sa1100/pm.c
1541 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1542 return virt_to_phys(sp);
1543 }
1544
1545 -static struct platform_suspend_ops sa11x0_pm_ops = {
1546 +static const struct platform_suspend_ops sa11x0_pm_ops = {
1547 .enter = sa11x0_pm_enter,
1548 .valid = suspend_valid_only_mem,
1549 };
1550 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1551 index 3191cd6..c322981 100644
1552 --- a/arch/arm/mm/fault.c
1553 +++ b/arch/arm/mm/fault.c
1554 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1555 }
1556 #endif
1557
1558 +#ifdef CONFIG_PAX_PAGEEXEC
1559 + if (fsr & FSR_LNX_PF) {
1560 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1561 + do_group_exit(SIGKILL);
1562 + }
1563 +#endif
1564 +
1565 tsk->thread.address = addr;
1566 tsk->thread.error_code = fsr;
1567 tsk->thread.trap_no = 14;
1568 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1569 }
1570 #endif /* CONFIG_MMU */
1571
1572 +#ifdef CONFIG_PAX_PAGEEXEC
1573 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1574 +{
1575 + long i;
1576 +
1577 + printk(KERN_ERR "PAX: bytes at PC: ");
1578 + for (i = 0; i < 20; i++) {
1579 + unsigned char c;
1580 + if (get_user(c, (__force unsigned char __user *)pc+i))
1581 + printk(KERN_CONT "?? ");
1582 + else
1583 + printk(KERN_CONT "%02x ", c);
1584 + }
1585 + printk("\n");
1586 +
1587 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1588 + for (i = -1; i < 20; i++) {
1589 + unsigned long c;
1590 + if (get_user(c, (__force unsigned long __user *)sp+i))
1591 + printk(KERN_CONT "???????? ");
1592 + else
1593 + printk(KERN_CONT "%08lx ", c);
1594 + }
1595 + printk("\n");
1596 +}
1597 +#endif
1598 +
1599 /*
1600 * First Level Translation Fault Handler
1601 *
1602 @@ -569,6 +603,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1603 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1604 struct siginfo info;
1605
1606 +#ifdef CONFIG_PAX_REFCOUNT
1607 + if (fsr_fs(ifsr) == 2) {
1608 + unsigned int bkpt;
1609 +
1610 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1611 + current->thread.error_code = ifsr;
1612 + current->thread.trap_no = 0;
1613 + pax_report_refcount_overflow(regs);
1614 + fixup_exception(regs);
1615 + return;
1616 + }
1617 + }
1618 +#endif
1619 +
1620 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1621 return;
1622
1623 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1624 index f5abc51..7ec524c 100644
1625 --- a/arch/arm/mm/mmap.c
1626 +++ b/arch/arm/mm/mmap.c
1627 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1628 if (len > TASK_SIZE)
1629 return -ENOMEM;
1630
1631 +#ifdef CONFIG_PAX_RANDMMAP
1632 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1633 +#endif
1634 +
1635 if (addr) {
1636 if (do_align)
1637 addr = COLOUR_ALIGN(addr, pgoff);
1638 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1639 addr = PAGE_ALIGN(addr);
1640
1641 vma = find_vma(mm, addr);
1642 - if (TASK_SIZE - len >= addr &&
1643 - (!vma || addr + len <= vma->vm_start))
1644 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1645 return addr;
1646 }
1647 if (len > mm->cached_hole_size) {
1648 - start_addr = addr = mm->free_area_cache;
1649 + start_addr = addr = mm->free_area_cache;
1650 } else {
1651 - start_addr = addr = TASK_UNMAPPED_BASE;
1652 - mm->cached_hole_size = 0;
1653 + start_addr = addr = mm->mmap_base;
1654 + mm->cached_hole_size = 0;
1655 }
1656
1657 full_search:
1658 @@ -94,14 +97,14 @@ full_search:
1659 * Start a new search - just in case we missed
1660 * some holes.
1661 */
1662 - if (start_addr != TASK_UNMAPPED_BASE) {
1663 - start_addr = addr = TASK_UNMAPPED_BASE;
1664 + if (start_addr != mm->mmap_base) {
1665 + start_addr = addr = mm->mmap_base;
1666 mm->cached_hole_size = 0;
1667 goto full_search;
1668 }
1669 return -ENOMEM;
1670 }
1671 - if (!vma || addr + len <= vma->vm_start) {
1672 + if (check_heap_stack_gap(vma, addr, len)) {
1673 /*
1674 * Remember the place where we stopped the search:
1675 */
1676 diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1677 index 8d97db2..b66cfa5 100644
1678 --- a/arch/arm/plat-s3c/pm.c
1679 +++ b/arch/arm/plat-s3c/pm.c
1680 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1681 s3c_pm_check_cleanup();
1682 }
1683
1684 -static struct platform_suspend_ops s3c_pm_ops = {
1685 +static const struct platform_suspend_ops s3c_pm_ops = {
1686 .enter = s3c_pm_enter,
1687 .prepare = s3c_pm_prepare,
1688 .finish = s3c_pm_finish,
1689 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1690 index d3cf35a..0ba6053 100644
1691 --- a/arch/avr32/include/asm/cache.h
1692 +++ b/arch/avr32/include/asm/cache.h
1693 @@ -1,8 +1,10 @@
1694 #ifndef __ASM_AVR32_CACHE_H
1695 #define __ASM_AVR32_CACHE_H
1696
1697 +#include <linux/const.h>
1698 +
1699 #define L1_CACHE_SHIFT 5
1700 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1701 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1702
1703 /*
1704 * Memory returned by kmalloc() may be used for DMA, so we must make
1705 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1706 index d5d1d41..856e2ed 100644
1707 --- a/arch/avr32/include/asm/elf.h
1708 +++ b/arch/avr32/include/asm/elf.h
1709 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1710 the loader. We need to make sure that it is out of the way of the program
1711 that it will "exec", and that there is sufficient room for the brk. */
1712
1713 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1714 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1715
1716 +#ifdef CONFIG_PAX_ASLR
1717 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1718 +
1719 +#define PAX_DELTA_MMAP_LEN 15
1720 +#define PAX_DELTA_STACK_LEN 15
1721 +#endif
1722
1723 /* This yields a mask that user programs can use to figure out what
1724 instruction set this CPU supports. This could be done in user space,
1725 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1726 index b7f5c68..556135c 100644
1727 --- a/arch/avr32/include/asm/kmap_types.h
1728 +++ b/arch/avr32/include/asm/kmap_types.h
1729 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1730 D(11) KM_IRQ1,
1731 D(12) KM_SOFTIRQ0,
1732 D(13) KM_SOFTIRQ1,
1733 -D(14) KM_TYPE_NR
1734 +D(14) KM_CLEARPAGE,
1735 +D(15) KM_TYPE_NR
1736 };
1737
1738 #undef D
1739 diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1740 index f021edf..32d680e 100644
1741 --- a/arch/avr32/mach-at32ap/pm.c
1742 +++ b/arch/avr32/mach-at32ap/pm.c
1743 @@ -176,7 +176,7 @@ out:
1744 return 0;
1745 }
1746
1747 -static struct platform_suspend_ops avr32_pm_ops = {
1748 +static const struct platform_suspend_ops avr32_pm_ops = {
1749 .valid = avr32_pm_valid_state,
1750 .enter = avr32_pm_enter,
1751 };
1752 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1753 index b61d86d..e292c7f 100644
1754 --- a/arch/avr32/mm/fault.c
1755 +++ b/arch/avr32/mm/fault.c
1756 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1757
1758 int exception_trace = 1;
1759
1760 +#ifdef CONFIG_PAX_PAGEEXEC
1761 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1762 +{
1763 + unsigned long i;
1764 +
1765 + printk(KERN_ERR "PAX: bytes at PC: ");
1766 + for (i = 0; i < 20; i++) {
1767 + unsigned char c;
1768 + if (get_user(c, (unsigned char *)pc+i))
1769 + printk(KERN_CONT "???????? ");
1770 + else
1771 + printk(KERN_CONT "%02x ", c);
1772 + }
1773 + printk("\n");
1774 +}
1775 +#endif
1776 +
1777 /*
1778 * This routine handles page faults. It determines the address and the
1779 * problem, and then passes it off to one of the appropriate routines.
1780 @@ -157,6 +174,16 @@ bad_area:
1781 up_read(&mm->mmap_sem);
1782
1783 if (user_mode(regs)) {
1784 +
1785 +#ifdef CONFIG_PAX_PAGEEXEC
1786 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1787 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1788 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1789 + do_group_exit(SIGKILL);
1790 + }
1791 + }
1792 +#endif
1793 +
1794 if (exception_trace && printk_ratelimit())
1795 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1796 "sp %08lx ecr %lu\n",
1797 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1798 index 93f6c63..d144953 100644
1799 --- a/arch/blackfin/include/asm/cache.h
1800 +++ b/arch/blackfin/include/asm/cache.h
1801 @@ -7,12 +7,14 @@
1802 #ifndef __ARCH_BLACKFIN_CACHE_H
1803 #define __ARCH_BLACKFIN_CACHE_H
1804
1805 +#include <linux/const.h>
1806 +
1807 /*
1808 * Bytes per L1 cache line
1809 * Blackfin loads 32 bytes for cache
1810 */
1811 #define L1_CACHE_SHIFT 5
1812 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1813 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1814 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1815
1816 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1817 diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1818 index cce79d0..c406c85 100644
1819 --- a/arch/blackfin/kernel/kgdb.c
1820 +++ b/arch/blackfin/kernel/kgdb.c
1821 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1822 return -1; /* this means that we do not want to exit from the handler */
1823 }
1824
1825 -struct kgdb_arch arch_kgdb_ops = {
1826 +const struct kgdb_arch arch_kgdb_ops = {
1827 .gdb_bpt_instr = {0xa1},
1828 #ifdef CONFIG_SMP
1829 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1830 diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1831 index 8837be4..b2fb413 100644
1832 --- a/arch/blackfin/mach-common/pm.c
1833 +++ b/arch/blackfin/mach-common/pm.c
1834 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1835 return 0;
1836 }
1837
1838 -struct platform_suspend_ops bfin_pm_ops = {
1839 +const struct platform_suspend_ops bfin_pm_ops = {
1840 .enter = bfin_pm_enter,
1841 .valid = bfin_pm_valid,
1842 };
1843 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
1844 index aea2718..3639a60 100644
1845 --- a/arch/cris/include/arch-v10/arch/cache.h
1846 +++ b/arch/cris/include/arch-v10/arch/cache.h
1847 @@ -1,8 +1,9 @@
1848 #ifndef _ASM_ARCH_CACHE_H
1849 #define _ASM_ARCH_CACHE_H
1850
1851 +#include <linux/const.h>
1852 /* Etrax 100LX have 32-byte cache-lines. */
1853 -#define L1_CACHE_BYTES 32
1854 #define L1_CACHE_SHIFT 5
1855 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1856
1857 #endif /* _ASM_ARCH_CACHE_H */
1858 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
1859 index dfc7305..417f5b3 100644
1860 --- a/arch/cris/include/arch-v32/arch/cache.h
1861 +++ b/arch/cris/include/arch-v32/arch/cache.h
1862 @@ -1,11 +1,12 @@
1863 #ifndef _ASM_CRIS_ARCH_CACHE_H
1864 #define _ASM_CRIS_ARCH_CACHE_H
1865
1866 +#include <linux/const.h>
1867 #include <arch/hwregs/dma.h>
1868
1869 /* A cache-line is 32 bytes. */
1870 -#define L1_CACHE_BYTES 32
1871 #define L1_CACHE_SHIFT 5
1872 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1873
1874 void flush_dma_list(dma_descr_data *descr);
1875 void flush_dma_descr(dma_descr_data *descr, int flush_buf);
1876 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1877 index 00a57af..c3ef0cd 100644
1878 --- a/arch/frv/include/asm/atomic.h
1879 +++ b/arch/frv/include/asm/atomic.h
1880 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1881 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1882 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1883
1884 +#define atomic64_read_unchecked(v) atomic64_read(v)
1885 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1886 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1887 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1888 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1889 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1890 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1891 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1892 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1893 +
1894 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1895 {
1896 int c, old;
1897 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
1898 index 7dc0f0f..1e6a620 100644
1899 --- a/arch/frv/include/asm/cache.h
1900 +++ b/arch/frv/include/asm/cache.h
1901 @@ -12,10 +12,11 @@
1902 #ifndef __ASM_CACHE_H
1903 #define __ASM_CACHE_H
1904
1905 +#include <linux/const.h>
1906
1907 /* bytes per L1 cache line */
1908 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
1909 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1910 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1911
1912 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1913
1914 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1915 index f8e16b2..c73ff79 100644
1916 --- a/arch/frv/include/asm/kmap_types.h
1917 +++ b/arch/frv/include/asm/kmap_types.h
1918 @@ -23,6 +23,7 @@ enum km_type {
1919 KM_IRQ1,
1920 KM_SOFTIRQ0,
1921 KM_SOFTIRQ1,
1922 + KM_CLEARPAGE,
1923 KM_TYPE_NR
1924 };
1925
1926 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1927 index 385fd30..6c3d97e 100644
1928 --- a/arch/frv/mm/elf-fdpic.c
1929 +++ b/arch/frv/mm/elf-fdpic.c
1930 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1931 if (addr) {
1932 addr = PAGE_ALIGN(addr);
1933 vma = find_vma(current->mm, addr);
1934 - if (TASK_SIZE - len >= addr &&
1935 - (!vma || addr + len <= vma->vm_start))
1936 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1937 goto success;
1938 }
1939
1940 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1941 for (; vma; vma = vma->vm_next) {
1942 if (addr > limit)
1943 break;
1944 - if (addr + len <= vma->vm_start)
1945 + if (check_heap_stack_gap(vma, addr, len))
1946 goto success;
1947 addr = vma->vm_end;
1948 }
1949 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1950 for (; vma; vma = vma->vm_next) {
1951 if (addr > limit)
1952 break;
1953 - if (addr + len <= vma->vm_start)
1954 + if (check_heap_stack_gap(vma, addr, len))
1955 goto success;
1956 addr = vma->vm_end;
1957 }
1958 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
1959 index c635028..6d9445a 100644
1960 --- a/arch/h8300/include/asm/cache.h
1961 +++ b/arch/h8300/include/asm/cache.h
1962 @@ -1,8 +1,10 @@
1963 #ifndef __ARCH_H8300_CACHE_H
1964 #define __ARCH_H8300_CACHE_H
1965
1966 +#include <linux/const.h>
1967 +
1968 /* bytes per L1 cache line */
1969 -#define L1_CACHE_BYTES 4
1970 +#define L1_CACHE_BYTES _AC(4,UL)
1971
1972 /* m68k-elf-gcc 2.95.2 doesn't like these */
1973
1974 diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1975 index e4a80d8..11a7ea1 100644
1976 --- a/arch/ia64/hp/common/hwsw_iommu.c
1977 +++ b/arch/ia64/hp/common/hwsw_iommu.c
1978 @@ -17,7 +17,7 @@
1979 #include <linux/swiotlb.h>
1980 #include <asm/machvec.h>
1981
1982 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1983 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1984
1985 /* swiotlb declarations & definitions: */
1986 extern int swiotlb_late_init_with_default_size (size_t size);
1987 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1988 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1989 }
1990
1991 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1992 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1993 {
1994 if (use_swiotlb(dev))
1995 return &swiotlb_dma_ops;
1996 diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1997 index 01ae69b..35752fd 100644
1998 --- a/arch/ia64/hp/common/sba_iommu.c
1999 +++ b/arch/ia64/hp/common/sba_iommu.c
2000 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2001 },
2002 };
2003
2004 -extern struct dma_map_ops swiotlb_dma_ops;
2005 +extern const struct dma_map_ops swiotlb_dma_ops;
2006
2007 static int __init
2008 sba_init(void)
2009 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
2010
2011 __setup("sbapagesize=",sba_page_override);
2012
2013 -struct dma_map_ops sba_dma_ops = {
2014 +const struct dma_map_ops sba_dma_ops = {
2015 .alloc_coherent = sba_alloc_coherent,
2016 .free_coherent = sba_free_coherent,
2017 .map_page = sba_map_page,
2018 diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
2019 index c69552b..c7122f4 100644
2020 --- a/arch/ia64/ia32/binfmt_elf32.c
2021 +++ b/arch/ia64/ia32/binfmt_elf32.c
2022 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
2023
2024 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
2025
2026 +#ifdef CONFIG_PAX_ASLR
2027 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2028 +
2029 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2030 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2031 +#endif
2032 +
2033 /* Ugly but avoids duplication */
2034 #include "../../../fs/binfmt_elf.c"
2035
2036 diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
2037 index 0f15349..26b3429 100644
2038 --- a/arch/ia64/ia32/ia32priv.h
2039 +++ b/arch/ia64/ia32/ia32priv.h
2040 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
2041 #define ELF_DATA ELFDATA2LSB
2042 #define ELF_ARCH EM_386
2043
2044 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
2045 +#ifdef CONFIG_PAX_RANDUSTACK
2046 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
2047 +#else
2048 +#define __IA32_DELTA_STACK 0UL
2049 +#endif
2050 +
2051 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
2052 +
2053 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
2054 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
2055
2056 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2057 index 88405cb..de5ca5d 100644
2058 --- a/arch/ia64/include/asm/atomic.h
2059 +++ b/arch/ia64/include/asm/atomic.h
2060 @@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2061 #define atomic64_inc(v) atomic64_add(1, (v))
2062 #define atomic64_dec(v) atomic64_sub(1, (v))
2063
2064 +#define atomic64_read_unchecked(v) atomic64_read(v)
2065 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2066 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2067 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2068 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2069 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2070 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2071 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2072 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2073 +
2074 /* Atomic operations are already serializing */
2075 #define smp_mb__before_atomic_dec() barrier()
2076 #define smp_mb__after_atomic_dec() barrier()
2077 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2078 index e7482bd..d1c9b8e 100644
2079 --- a/arch/ia64/include/asm/cache.h
2080 +++ b/arch/ia64/include/asm/cache.h
2081 @@ -1,6 +1,7 @@
2082 #ifndef _ASM_IA64_CACHE_H
2083 #define _ASM_IA64_CACHE_H
2084
2085 +#include <linux/const.h>
2086
2087 /*
2088 * Copyright (C) 1998-2000 Hewlett-Packard Co
2089 @@ -9,7 +10,7 @@
2090
2091 /* Bytes per L1 (data) cache line. */
2092 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2093 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2094 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2095
2096 #ifdef CONFIG_SMP
2097 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2098 diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
2099 index 8d3c79c..71b3af6 100644
2100 --- a/arch/ia64/include/asm/dma-mapping.h
2101 +++ b/arch/ia64/include/asm/dma-mapping.h
2102 @@ -12,7 +12,7 @@
2103
2104 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
2105
2106 -extern struct dma_map_ops *dma_ops;
2107 +extern const struct dma_map_ops *dma_ops;
2108 extern struct ia64_machine_vector ia64_mv;
2109 extern void set_iommu_machvec(void);
2110
2111 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
2112 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2113 dma_addr_t *daddr, gfp_t gfp)
2114 {
2115 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
2116 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2117 void *caddr;
2118
2119 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
2120 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2121 static inline void dma_free_coherent(struct device *dev, size_t size,
2122 void *caddr, dma_addr_t daddr)
2123 {
2124 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
2125 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2126 debug_dma_free_coherent(dev, size, caddr, daddr);
2127 ops->free_coherent(dev, size, caddr, daddr);
2128 }
2129 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2130
2131 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
2132 {
2133 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
2134 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2135 return ops->mapping_error(dev, daddr);
2136 }
2137
2138 static inline int dma_supported(struct device *dev, u64 mask)
2139 {
2140 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
2141 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2142 return ops->dma_supported(dev, mask);
2143 }
2144
2145 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2146 index 86eddee..b116bb4 100644
2147 --- a/arch/ia64/include/asm/elf.h
2148 +++ b/arch/ia64/include/asm/elf.h
2149 @@ -43,6 +43,13 @@
2150 */
2151 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2152
2153 +#ifdef CONFIG_PAX_ASLR
2154 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2155 +
2156 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2157 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2158 +#endif
2159 +
2160 #define PT_IA_64_UNWIND 0x70000001
2161
2162 /* IA-64 relocations: */
2163 diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
2164 index 367d299..9ad4279 100644
2165 --- a/arch/ia64/include/asm/machvec.h
2166 +++ b/arch/ia64/include/asm/machvec.h
2167 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
2168 /* DMA-mapping interface: */
2169 typedef void ia64_mv_dma_init (void);
2170 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
2171 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
2172 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
2173
2174 /*
2175 * WARNING: The legacy I/O space is _architected_. Platforms are
2176 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
2177 # endif /* CONFIG_IA64_GENERIC */
2178
2179 extern void swiotlb_dma_init(void);
2180 -extern struct dma_map_ops *dma_get_ops(struct device *);
2181 +extern const struct dma_map_ops *dma_get_ops(struct device *);
2182
2183 /*
2184 * Define default versions so we can extend machvec for new platforms without having
2185 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2186 index 8840a69..cdb63d9 100644
2187 --- a/arch/ia64/include/asm/pgtable.h
2188 +++ b/arch/ia64/include/asm/pgtable.h
2189 @@ -12,7 +12,7 @@
2190 * David Mosberger-Tang <davidm@hpl.hp.com>
2191 */
2192
2193 -
2194 +#include <linux/const.h>
2195 #include <asm/mman.h>
2196 #include <asm/page.h>
2197 #include <asm/processor.h>
2198 @@ -143,6 +143,17 @@
2199 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2200 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2201 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2202 +
2203 +#ifdef CONFIG_PAX_PAGEEXEC
2204 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2205 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2206 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2207 +#else
2208 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2209 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2210 +# define PAGE_COPY_NOEXEC PAGE_COPY
2211 +#endif
2212 +
2213 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2214 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2215 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2216 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2217 index 239ecdc..f94170e 100644
2218 --- a/arch/ia64/include/asm/spinlock.h
2219 +++ b/arch/ia64/include/asm/spinlock.h
2220 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
2221 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2222
2223 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2224 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2225 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2226 }
2227
2228 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
2229 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2230 index 449c8c0..432a3d2 100644
2231 --- a/arch/ia64/include/asm/uaccess.h
2232 +++ b/arch/ia64/include/asm/uaccess.h
2233 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2234 const void *__cu_from = (from); \
2235 long __cu_len = (n); \
2236 \
2237 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2238 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2239 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2240 __cu_len; \
2241 })
2242 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2243 long __cu_len = (n); \
2244 \
2245 __chk_user_ptr(__cu_from); \
2246 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2247 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2248 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2249 __cu_len; \
2250 })
2251 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
2252 index f2c1600..969398a 100644
2253 --- a/arch/ia64/kernel/dma-mapping.c
2254 +++ b/arch/ia64/kernel/dma-mapping.c
2255 @@ -3,7 +3,7 @@
2256 /* Set this to 1 if there is a HW IOMMU in the system */
2257 int iommu_detected __read_mostly;
2258
2259 -struct dma_map_ops *dma_ops;
2260 +const struct dma_map_ops *dma_ops;
2261 EXPORT_SYMBOL(dma_ops);
2262
2263 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
2264 @@ -16,7 +16,7 @@ static int __init dma_init(void)
2265 }
2266 fs_initcall(dma_init);
2267
2268 -struct dma_map_ops *dma_get_ops(struct device *dev)
2269 +const struct dma_map_ops *dma_get_ops(struct device *dev)
2270 {
2271 return dma_ops;
2272 }
2273 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2274 index 1481b0a..e7d38ff 100644
2275 --- a/arch/ia64/kernel/module.c
2276 +++ b/arch/ia64/kernel/module.c
2277 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
2278 void
2279 module_free (struct module *mod, void *module_region)
2280 {
2281 - if (mod && mod->arch.init_unw_table &&
2282 - module_region == mod->module_init) {
2283 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2284 unw_remove_unwind_table(mod->arch.init_unw_table);
2285 mod->arch.init_unw_table = NULL;
2286 }
2287 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2288 }
2289
2290 static inline int
2291 +in_init_rx (const struct module *mod, uint64_t addr)
2292 +{
2293 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2294 +}
2295 +
2296 +static inline int
2297 +in_init_rw (const struct module *mod, uint64_t addr)
2298 +{
2299 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2300 +}
2301 +
2302 +static inline int
2303 in_init (const struct module *mod, uint64_t addr)
2304 {
2305 - return addr - (uint64_t) mod->module_init < mod->init_size;
2306 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2307 +}
2308 +
2309 +static inline int
2310 +in_core_rx (const struct module *mod, uint64_t addr)
2311 +{
2312 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2313 +}
2314 +
2315 +static inline int
2316 +in_core_rw (const struct module *mod, uint64_t addr)
2317 +{
2318 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2319 }
2320
2321 static inline int
2322 in_core (const struct module *mod, uint64_t addr)
2323 {
2324 - return addr - (uint64_t) mod->module_core < mod->core_size;
2325 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2326 }
2327
2328 static inline int
2329 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2330 break;
2331
2332 case RV_BDREL:
2333 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2334 + if (in_init_rx(mod, val))
2335 + val -= (uint64_t) mod->module_init_rx;
2336 + else if (in_init_rw(mod, val))
2337 + val -= (uint64_t) mod->module_init_rw;
2338 + else if (in_core_rx(mod, val))
2339 + val -= (uint64_t) mod->module_core_rx;
2340 + else if (in_core_rw(mod, val))
2341 + val -= (uint64_t) mod->module_core_rw;
2342 break;
2343
2344 case RV_LTV:
2345 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2346 * addresses have been selected...
2347 */
2348 uint64_t gp;
2349 - if (mod->core_size > MAX_LTOFF)
2350 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2351 /*
2352 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2353 * at the end of the module.
2354 */
2355 - gp = mod->core_size - MAX_LTOFF / 2;
2356 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2357 else
2358 - gp = mod->core_size / 2;
2359 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2360 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2361 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2362 mod->arch.gp = gp;
2363 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2364 }
2365 diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
2366 index f6b1ff0..de773fb 100644
2367 --- a/arch/ia64/kernel/pci-dma.c
2368 +++ b/arch/ia64/kernel/pci-dma.c
2369 @@ -43,7 +43,7 @@ struct device fallback_dev = {
2370 .dma_mask = &fallback_dev.coherent_dma_mask,
2371 };
2372
2373 -extern struct dma_map_ops intel_dma_ops;
2374 +extern const struct dma_map_ops intel_dma_ops;
2375
2376 static int __init pci_iommu_init(void)
2377 {
2378 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
2379 }
2380 EXPORT_SYMBOL(iommu_dma_supported);
2381
2382 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
2383 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
2384 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
2385 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
2386 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
2387 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
2388 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
2389 +
2390 +static const struct dma_map_ops intel_iommu_dma_ops = {
2391 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
2392 + .alloc_coherent = intel_alloc_coherent,
2393 + .free_coherent = intel_free_coherent,
2394 + .map_sg = intel_map_sg,
2395 + .unmap_sg = intel_unmap_sg,
2396 + .map_page = intel_map_page,
2397 + .unmap_page = intel_unmap_page,
2398 + .mapping_error = intel_mapping_error,
2399 +
2400 + .sync_single_for_cpu = machvec_dma_sync_single,
2401 + .sync_sg_for_cpu = machvec_dma_sync_sg,
2402 + .sync_single_for_device = machvec_dma_sync_single,
2403 + .sync_sg_for_device = machvec_dma_sync_sg,
2404 + .dma_supported = iommu_dma_supported,
2405 +};
2406 +
2407 void __init pci_iommu_alloc(void)
2408 {
2409 - dma_ops = &intel_dma_ops;
2410 -
2411 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
2412 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
2413 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
2414 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
2415 - dma_ops->dma_supported = iommu_dma_supported;
2416 + dma_ops = &intel_iommu_dma_ops;
2417
2418 /*
2419 * The order of these functions is important for
2420 diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
2421 index 285aae8..61dbab6 100644
2422 --- a/arch/ia64/kernel/pci-swiotlb.c
2423 +++ b/arch/ia64/kernel/pci-swiotlb.c
2424 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
2425 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
2426 }
2427
2428 -struct dma_map_ops swiotlb_dma_ops = {
2429 +const struct dma_map_ops swiotlb_dma_ops = {
2430 .alloc_coherent = ia64_swiotlb_alloc_coherent,
2431 .free_coherent = swiotlb_free_coherent,
2432 .map_page = swiotlb_map_page,
2433 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2434 index 609d500..7dde2a8 100644
2435 --- a/arch/ia64/kernel/sys_ia64.c
2436 +++ b/arch/ia64/kernel/sys_ia64.c
2437 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2438 if (REGION_NUMBER(addr) == RGN_HPAGE)
2439 addr = 0;
2440 #endif
2441 +
2442 +#ifdef CONFIG_PAX_RANDMMAP
2443 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2444 + addr = mm->free_area_cache;
2445 + else
2446 +#endif
2447 +
2448 if (!addr)
2449 addr = mm->free_area_cache;
2450
2451 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2452 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2453 /* At this point: (!vma || addr < vma->vm_end). */
2454 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2455 - if (start_addr != TASK_UNMAPPED_BASE) {
2456 + if (start_addr != mm->mmap_base) {
2457 /* Start a new search --- just in case we missed some holes. */
2458 - addr = TASK_UNMAPPED_BASE;
2459 + addr = mm->mmap_base;
2460 goto full_search;
2461 }
2462 return -ENOMEM;
2463 }
2464 - if (!vma || addr + len <= vma->vm_start) {
2465 + if (check_heap_stack_gap(vma, addr, len)) {
2466 /* Remember the address where we stopped this search: */
2467 mm->free_area_cache = addr + len;
2468 return addr;
2469 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
2470 index 8f06035..b3a5818 100644
2471 --- a/arch/ia64/kernel/topology.c
2472 +++ b/arch/ia64/kernel/topology.c
2473 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
2474 return ret;
2475 }
2476
2477 -static struct sysfs_ops cache_sysfs_ops = {
2478 +static const struct sysfs_ops cache_sysfs_ops = {
2479 .show = cache_show
2480 };
2481
2482 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2483 index 0a0c77b..8e55a81 100644
2484 --- a/arch/ia64/kernel/vmlinux.lds.S
2485 +++ b/arch/ia64/kernel/vmlinux.lds.S
2486 @@ -190,7 +190,7 @@ SECTIONS
2487 /* Per-cpu data: */
2488 . = ALIGN(PERCPU_PAGE_SIZE);
2489 PERCPU_VADDR(PERCPU_ADDR, :percpu)
2490 - __phys_per_cpu_start = __per_cpu_load;
2491 + __phys_per_cpu_start = per_cpu_load;
2492 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
2493 * into percpu page size
2494 */
2495 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2496 index 19261a9..1611b7a 100644
2497 --- a/arch/ia64/mm/fault.c
2498 +++ b/arch/ia64/mm/fault.c
2499 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2500 return pte_present(pte);
2501 }
2502
2503 +#ifdef CONFIG_PAX_PAGEEXEC
2504 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2505 +{
2506 + unsigned long i;
2507 +
2508 + printk(KERN_ERR "PAX: bytes at PC: ");
2509 + for (i = 0; i < 8; i++) {
2510 + unsigned int c;
2511 + if (get_user(c, (unsigned int *)pc+i))
2512 + printk(KERN_CONT "???????? ");
2513 + else
2514 + printk(KERN_CONT "%08x ", c);
2515 + }
2516 + printk("\n");
2517 +}
2518 +#endif
2519 +
2520 void __kprobes
2521 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2522 {
2523 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2524 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2525 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2526
2527 - if ((vma->vm_flags & mask) != mask)
2528 + if ((vma->vm_flags & mask) != mask) {
2529 +
2530 +#ifdef CONFIG_PAX_PAGEEXEC
2531 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2532 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2533 + goto bad_area;
2534 +
2535 + up_read(&mm->mmap_sem);
2536 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2537 + do_group_exit(SIGKILL);
2538 + }
2539 +#endif
2540 +
2541 goto bad_area;
2542
2543 + }
2544 +
2545 survive:
2546 /*
2547 * If for any reason at all we couldn't handle the fault, make
2548 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2549 index b0f6157..a082bbc 100644
2550 --- a/arch/ia64/mm/hugetlbpage.c
2551 +++ b/arch/ia64/mm/hugetlbpage.c
2552 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2553 /* At this point: (!vmm || addr < vmm->vm_end). */
2554 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2555 return -ENOMEM;
2556 - if (!vmm || (addr + len) <= vmm->vm_start)
2557 + if (check_heap_stack_gap(vmm, addr, len))
2558 return addr;
2559 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2560 }
2561 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2562 index 1857766..05cc6a3 100644
2563 --- a/arch/ia64/mm/init.c
2564 +++ b/arch/ia64/mm/init.c
2565 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
2566 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2567 vma->vm_end = vma->vm_start + PAGE_SIZE;
2568 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2569 +
2570 +#ifdef CONFIG_PAX_PAGEEXEC
2571 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2572 + vma->vm_flags &= ~VM_EXEC;
2573 +
2574 +#ifdef CONFIG_PAX_MPROTECT
2575 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2576 + vma->vm_flags &= ~VM_MAYEXEC;
2577 +#endif
2578 +
2579 + }
2580 +#endif
2581 +
2582 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2583 down_write(&current->mm->mmap_sem);
2584 if (insert_vm_struct(current->mm, vma)) {
2585 diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
2586 index 98b6849..8046766 100644
2587 --- a/arch/ia64/sn/pci/pci_dma.c
2588 +++ b/arch/ia64/sn/pci/pci_dma.c
2589 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
2590 return ret;
2591 }
2592
2593 -static struct dma_map_ops sn_dma_ops = {
2594 +static const struct dma_map_ops sn_dma_ops = {
2595 .alloc_coherent = sn_dma_alloc_coherent,
2596 .free_coherent = sn_dma_free_coherent,
2597 .map_page = sn_dma_map_page,
2598 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2599 index 40b3ee9..8c2c112 100644
2600 --- a/arch/m32r/include/asm/cache.h
2601 +++ b/arch/m32r/include/asm/cache.h
2602 @@ -1,8 +1,10 @@
2603 #ifndef _ASM_M32R_CACHE_H
2604 #define _ASM_M32R_CACHE_H
2605
2606 +#include <linux/const.h>
2607 +
2608 /* L1 cache line size */
2609 #define L1_CACHE_SHIFT 4
2610 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2611 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2612
2613 #endif /* _ASM_M32R_CACHE_H */
2614 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2615 index 82abd15..d95ae5d 100644
2616 --- a/arch/m32r/lib/usercopy.c
2617 +++ b/arch/m32r/lib/usercopy.c
2618 @@ -14,6 +14,9 @@
2619 unsigned long
2620 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2621 {
2622 + if ((long)n < 0)
2623 + return n;
2624 +
2625 prefetch(from);
2626 if (access_ok(VERIFY_WRITE, to, n))
2627 __copy_user(to,from,n);
2628 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2629 unsigned long
2630 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2631 {
2632 + if ((long)n < 0)
2633 + return n;
2634 +
2635 prefetchw(to);
2636 if (access_ok(VERIFY_READ, from, n))
2637 __copy_user_zeroing(to,from,n);
2638 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2639 index ecafbe1..432c3e4 100644
2640 --- a/arch/m68k/include/asm/cache.h
2641 +++ b/arch/m68k/include/asm/cache.h
2642 @@ -4,9 +4,11 @@
2643 #ifndef __ARCH_M68K_CACHE_H
2644 #define __ARCH_M68K_CACHE_H
2645
2646 +#include <linux/const.h>
2647 +
2648 /* bytes per L1 cache line */
2649 #define L1_CACHE_SHIFT 4
2650 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2651 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2652
2653 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
2654
2655 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2656 index c209c47..2ba96e2 100644
2657 --- a/arch/microblaze/include/asm/cache.h
2658 +++ b/arch/microblaze/include/asm/cache.h
2659 @@ -13,11 +13,12 @@
2660 #ifndef _ASM_MICROBLAZE_CACHE_H
2661 #define _ASM_MICROBLAZE_CACHE_H
2662
2663 +#include <linux/const.h>
2664 #include <asm/registers.h>
2665
2666 #define L1_CACHE_SHIFT 2
2667 /* word-granular cache in microblaze */
2668 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2669 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2670
2671 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2672
2673 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2674 index fd7620f..63d73a6 100644
2675 --- a/arch/mips/Kconfig
2676 +++ b/arch/mips/Kconfig
2677 @@ -5,6 +5,7 @@ config MIPS
2678 select HAVE_IDE
2679 select HAVE_OPROFILE
2680 select HAVE_ARCH_KGDB
2681 + select GENERIC_ATOMIC64 if !64BIT
2682 # Horrible source of confusion. Die, die, die ...
2683 select EMBEDDED
2684 select RTC_LIB if !LEMOTE_FULOONG2E
2685 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2686 index 77f5021..2b1db8a 100644
2687 --- a/arch/mips/Makefile
2688 +++ b/arch/mips/Makefile
2689 @@ -51,6 +51,8 @@ endif
2690 cflags-y := -ffunction-sections
2691 cflags-y += $(call cc-option, -mno-check-zero-division)
2692
2693 +cflags-y += -Wno-sign-compare -Wno-extra
2694 +
2695 ifdef CONFIG_32BIT
2696 ld-emul = $(32bit-emul)
2697 vmlinux-32 = vmlinux
2698 diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2699 index 632f986..fd0378d 100644
2700 --- a/arch/mips/alchemy/devboards/pm.c
2701 +++ b/arch/mips/alchemy/devboards/pm.c
2702 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2703
2704 }
2705
2706 -static struct platform_suspend_ops db1x_pm_ops = {
2707 +static const struct platform_suspend_ops db1x_pm_ops = {
2708 .valid = suspend_valid_only_mem,
2709 .begin = db1x_pm_begin,
2710 .enter = db1x_pm_enter,
2711 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2712 index 09e7128..111035b 100644
2713 --- a/arch/mips/include/asm/atomic.h
2714 +++ b/arch/mips/include/asm/atomic.h
2715 @@ -21,6 +21,10 @@
2716 #include <asm/war.h>
2717 #include <asm/system.h>
2718
2719 +#ifdef CONFIG_GENERIC_ATOMIC64
2720 +#include <asm-generic/atomic64.h>
2721 +#endif
2722 +
2723 #define ATOMIC_INIT(i) { (i) }
2724
2725 /*
2726 @@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2727 */
2728 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2729
2730 +#define atomic64_read_unchecked(v) atomic64_read(v)
2731 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2732 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2733 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2734 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2735 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2736 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2737 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2738 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2739 +
2740 #endif /* CONFIG_64BIT */
2741
2742 /*
2743 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2744 index 37f175c..c7a3065 100644
2745 --- a/arch/mips/include/asm/cache.h
2746 +++ b/arch/mips/include/asm/cache.h
2747 @@ -9,10 +9,11 @@
2748 #ifndef _ASM_CACHE_H
2749 #define _ASM_CACHE_H
2750
2751 +#include <linux/const.h>
2752 #include <kmalloc.h>
2753
2754 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2755 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2756 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2757
2758 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2759 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2760 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2761 index 7990694..4e93acf 100644
2762 --- a/arch/mips/include/asm/elf.h
2763 +++ b/arch/mips/include/asm/elf.h
2764 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2765 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2766 #endif
2767
2768 +#ifdef CONFIG_PAX_ASLR
2769 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2770 +
2771 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2772 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2773 +#endif
2774 +
2775 #endif /* _ASM_ELF_H */
2776 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2777 index f266295..627cfff 100644
2778 --- a/arch/mips/include/asm/page.h
2779 +++ b/arch/mips/include/asm/page.h
2780 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2781 #ifdef CONFIG_CPU_MIPS32
2782 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2783 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2784 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2785 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2786 #else
2787 typedef struct { unsigned long long pte; } pte_t;
2788 #define pte_val(x) ((x).pte)
2789 diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2790 index e48c0bf..f3acf65 100644
2791 --- a/arch/mips/include/asm/reboot.h
2792 +++ b/arch/mips/include/asm/reboot.h
2793 @@ -9,7 +9,7 @@
2794 #ifndef _ASM_REBOOT_H
2795 #define _ASM_REBOOT_H
2796
2797 -extern void (*_machine_restart)(char *command);
2798 -extern void (*_machine_halt)(void);
2799 +extern void (*__noreturn _machine_restart)(char *command);
2800 +extern void (*__noreturn _machine_halt)(void);
2801
2802 #endif /* _ASM_REBOOT_H */
2803 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2804 index 83b5509..9fa24a23 100644
2805 --- a/arch/mips/include/asm/system.h
2806 +++ b/arch/mips/include/asm/system.h
2807 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2808 */
2809 #define __ARCH_WANT_UNLOCKED_CTXSW
2810
2811 -extern unsigned long arch_align_stack(unsigned long sp);
2812 +#define arch_align_stack(x) ((x) & ~0xfUL)
2813
2814 #endif /* _ASM_SYSTEM_H */
2815 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2816 index 9fdd8bc..fcf9d68 100644
2817 --- a/arch/mips/kernel/binfmt_elfn32.c
2818 +++ b/arch/mips/kernel/binfmt_elfn32.c
2819 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2820 #undef ELF_ET_DYN_BASE
2821 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2822
2823 +#ifdef CONFIG_PAX_ASLR
2824 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2825 +
2826 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2827 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2828 +#endif
2829 +
2830 #include <asm/processor.h>
2831 #include <linux/module.h>
2832 #include <linux/elfcore.h>
2833 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2834 index ff44823..cf0b48a 100644
2835 --- a/arch/mips/kernel/binfmt_elfo32.c
2836 +++ b/arch/mips/kernel/binfmt_elfo32.c
2837 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2838 #undef ELF_ET_DYN_BASE
2839 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2840
2841 +#ifdef CONFIG_PAX_ASLR
2842 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2843 +
2844 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2845 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2846 +#endif
2847 +
2848 #include <asm/processor.h>
2849
2850 /*
2851 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2852 index 50c9bb8..efdd5f8 100644
2853 --- a/arch/mips/kernel/kgdb.c
2854 +++ b/arch/mips/kernel/kgdb.c
2855 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2856 return -1;
2857 }
2858
2859 +/* cannot be const */
2860 struct kgdb_arch arch_kgdb_ops;
2861
2862 /*
2863 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2864 index f3d73e1..bb3f57a 100644
2865 --- a/arch/mips/kernel/process.c
2866 +++ b/arch/mips/kernel/process.c
2867 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2868 out:
2869 return pc;
2870 }
2871 -
2872 -/*
2873 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2874 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2875 - */
2876 -unsigned long arch_align_stack(unsigned long sp)
2877 -{
2878 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2879 - sp -= get_random_int() & ~PAGE_MASK;
2880 -
2881 - return sp & ALMASK;
2882 -}
2883 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2884 index 060563a..7fbf310 100644
2885 --- a/arch/mips/kernel/reset.c
2886 +++ b/arch/mips/kernel/reset.c
2887 @@ -19,8 +19,8 @@
2888 * So handle all using function pointers to machine specific
2889 * functions.
2890 */
2891 -void (*_machine_restart)(char *command);
2892 -void (*_machine_halt)(void);
2893 +void (*__noreturn _machine_restart)(char *command);
2894 +void (*__noreturn _machine_halt)(void);
2895 void (*pm_power_off)(void);
2896
2897 EXPORT_SYMBOL(pm_power_off);
2898 @@ -29,16 +29,19 @@ void machine_restart(char *command)
2899 {
2900 if (_machine_restart)
2901 _machine_restart(command);
2902 + BUG();
2903 }
2904
2905 void machine_halt(void)
2906 {
2907 if (_machine_halt)
2908 _machine_halt();
2909 + BUG();
2910 }
2911
2912 void machine_power_off(void)
2913 {
2914 if (pm_power_off)
2915 pm_power_off();
2916 + BUG();
2917 }
2918 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2919 index 3f7f466..3abe0b5 100644
2920 --- a/arch/mips/kernel/syscall.c
2921 +++ b/arch/mips/kernel/syscall.c
2922 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2923 do_color_align = 0;
2924 if (filp || (flags & MAP_SHARED))
2925 do_color_align = 1;
2926 +
2927 +#ifdef CONFIG_PAX_RANDMMAP
2928 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2929 +#endif
2930 +
2931 if (addr) {
2932 if (do_color_align)
2933 addr = COLOUR_ALIGN(addr, pgoff);
2934 else
2935 addr = PAGE_ALIGN(addr);
2936 vmm = find_vma(current->mm, addr);
2937 - if (task_size - len >= addr &&
2938 - (!vmm || addr + len <= vmm->vm_start))
2939 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2940 return addr;
2941 }
2942 - addr = TASK_UNMAPPED_BASE;
2943 + addr = current->mm->mmap_base;
2944 if (do_color_align)
2945 addr = COLOUR_ALIGN(addr, pgoff);
2946 else
2947 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2948 /* At this point: (!vmm || addr < vmm->vm_end). */
2949 if (task_size - len < addr)
2950 return -ENOMEM;
2951 - if (!vmm || addr + len <= vmm->vm_start)
2952 + if (check_heap_stack_gap(vmm, addr, len))
2953 return addr;
2954 addr = vmm->vm_end;
2955 if (do_color_align)
2956 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2957 index e97a7a2..f18f5b0 100644
2958 --- a/arch/mips/mm/fault.c
2959 +++ b/arch/mips/mm/fault.c
2960 @@ -26,6 +26,23 @@
2961 #include <asm/ptrace.h>
2962 #include <asm/highmem.h> /* For VMALLOC_END */
2963
2964 +#ifdef CONFIG_PAX_PAGEEXEC
2965 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2966 +{
2967 + unsigned long i;
2968 +
2969 + printk(KERN_ERR "PAX: bytes at PC: ");
2970 + for (i = 0; i < 5; i++) {
2971 + unsigned int c;
2972 + if (get_user(c, (unsigned int *)pc+i))
2973 + printk(KERN_CONT "???????? ");
2974 + else
2975 + printk(KERN_CONT "%08x ", c);
2976 + }
2977 + printk("\n");
2978 +}
2979 +#endif
2980 +
2981 /*
2982 * This routine handles page faults. It determines the address,
2983 * and the problem, and then passes it off to one of the appropriate
2984 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2985 index bdc1f9a..e8de5c5 100644
2986 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2987 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2988 @@ -11,12 +11,14 @@
2989 #ifndef _ASM_PROC_CACHE_H
2990 #define _ASM_PROC_CACHE_H
2991
2992 +#include <linux/const.h>
2993 +
2994 /* L1 cache */
2995
2996 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2997 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2998 -#define L1_CACHE_BYTES 16 /* bytes per entry */
2999 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3000 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3001 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3002
3003 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3004 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3005 index 8bc9e96..26554f8 100644
3006 --- a/arch/parisc/include/asm/atomic.h
3007 +++ b/arch/parisc/include/asm/atomic.h
3008 @@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3009
3010 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3011
3012 +#define atomic64_read_unchecked(v) atomic64_read(v)
3013 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3014 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3015 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3016 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3017 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3018 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3019 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3020 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3021 +
3022 #else /* CONFIG_64BIT */
3023
3024 #include <asm-generic/atomic64.h>
3025 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3026 index 32c2cca..a7b3a64 100644
3027 --- a/arch/parisc/include/asm/cache.h
3028 +++ b/arch/parisc/include/asm/cache.h
3029 @@ -5,6 +5,7 @@
3030 #ifndef __ARCH_PARISC_CACHE_H
3031 #define __ARCH_PARISC_CACHE_H
3032
3033 +#include <linux/const.h>
3034
3035 /*
3036 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3037 @@ -15,13 +16,13 @@
3038 * just ruin performance.
3039 */
3040 #ifdef CONFIG_PA20
3041 -#define L1_CACHE_BYTES 64
3042 #define L1_CACHE_SHIFT 6
3043 #else
3044 -#define L1_CACHE_BYTES 32
3045 #define L1_CACHE_SHIFT 5
3046 #endif
3047
3048 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3049 +
3050 #ifndef __ASSEMBLY__
3051
3052 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
3053 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3054 index 9c802eb..0592e41 100644
3055 --- a/arch/parisc/include/asm/elf.h
3056 +++ b/arch/parisc/include/asm/elf.h
3057 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
3058
3059 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3060
3061 +#ifdef CONFIG_PAX_ASLR
3062 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3063 +
3064 +#define PAX_DELTA_MMAP_LEN 16
3065 +#define PAX_DELTA_STACK_LEN 16
3066 +#endif
3067 +
3068 /* This yields a mask that user programs can use to figure out what
3069 instruction set this CPU supports. This could be done in user space,
3070 but it's not easy, and we've already done it here. */
3071 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3072 index a27d2e2..18fd845 100644
3073 --- a/arch/parisc/include/asm/pgtable.h
3074 +++ b/arch/parisc/include/asm/pgtable.h
3075 @@ -207,6 +207,17 @@
3076 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3077 #define PAGE_COPY PAGE_EXECREAD
3078 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3079 +
3080 +#ifdef CONFIG_PAX_PAGEEXEC
3081 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3082 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3083 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3084 +#else
3085 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3086 +# define PAGE_COPY_NOEXEC PAGE_COPY
3087 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3088 +#endif
3089 +
3090 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3091 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
3092 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
3093 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3094 index 2120746..8d70a5e 100644
3095 --- a/arch/parisc/kernel/module.c
3096 +++ b/arch/parisc/kernel/module.c
3097 @@ -95,16 +95,38 @@
3098
3099 /* three functions to determine where in the module core
3100 * or init pieces the location is */
3101 +static inline int in_init_rx(struct module *me, void *loc)
3102 +{
3103 + return (loc >= me->module_init_rx &&
3104 + loc < (me->module_init_rx + me->init_size_rx));
3105 +}
3106 +
3107 +static inline int in_init_rw(struct module *me, void *loc)
3108 +{
3109 + return (loc >= me->module_init_rw &&
3110 + loc < (me->module_init_rw + me->init_size_rw));
3111 +}
3112 +
3113 static inline int in_init(struct module *me, void *loc)
3114 {
3115 - return (loc >= me->module_init &&
3116 - loc <= (me->module_init + me->init_size));
3117 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3118 +}
3119 +
3120 +static inline int in_core_rx(struct module *me, void *loc)
3121 +{
3122 + return (loc >= me->module_core_rx &&
3123 + loc < (me->module_core_rx + me->core_size_rx));
3124 +}
3125 +
3126 +static inline int in_core_rw(struct module *me, void *loc)
3127 +{
3128 + return (loc >= me->module_core_rw &&
3129 + loc < (me->module_core_rw + me->core_size_rw));
3130 }
3131
3132 static inline int in_core(struct module *me, void *loc)
3133 {
3134 - return (loc >= me->module_core &&
3135 - loc <= (me->module_core + me->core_size));
3136 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3137 }
3138
3139 static inline int in_local(struct module *me, void *loc)
3140 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3141 }
3142
3143 /* align things a bit */
3144 - me->core_size = ALIGN(me->core_size, 16);
3145 - me->arch.got_offset = me->core_size;
3146 - me->core_size += gots * sizeof(struct got_entry);
3147 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3148 + me->arch.got_offset = me->core_size_rw;
3149 + me->core_size_rw += gots * sizeof(struct got_entry);
3150
3151 - me->core_size = ALIGN(me->core_size, 16);
3152 - me->arch.fdesc_offset = me->core_size;
3153 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3154 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3155 + me->arch.fdesc_offset = me->core_size_rw;
3156 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3157
3158 me->arch.got_max = gots;
3159 me->arch.fdesc_max = fdescs;
3160 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3161
3162 BUG_ON(value == 0);
3163
3164 - got = me->module_core + me->arch.got_offset;
3165 + got = me->module_core_rw + me->arch.got_offset;
3166 for (i = 0; got[i].addr; i++)
3167 if (got[i].addr == value)
3168 goto out;
3169 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3170 #ifdef CONFIG_64BIT
3171 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3172 {
3173 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3174 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3175
3176 if (!value) {
3177 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3178 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3179
3180 /* Create new one */
3181 fdesc->addr = value;
3182 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3183 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3184 return (Elf_Addr)fdesc;
3185 }
3186 #endif /* CONFIG_64BIT */
3187 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
3188
3189 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3190 end = table + sechdrs[me->arch.unwind_section].sh_size;
3191 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3192 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3193
3194 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3195 me->arch.unwind_section, table, end, gp);
3196 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3197 index 9147391..f3d949a 100644
3198 --- a/arch/parisc/kernel/sys_parisc.c
3199 +++ b/arch/parisc/kernel/sys_parisc.c
3200 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3201 /* At this point: (!vma || addr < vma->vm_end). */
3202 if (TASK_SIZE - len < addr)
3203 return -ENOMEM;
3204 - if (!vma || addr + len <= vma->vm_start)
3205 + if (check_heap_stack_gap(vma, addr, len))
3206 return addr;
3207 addr = vma->vm_end;
3208 }
3209 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3210 /* At this point: (!vma || addr < vma->vm_end). */
3211 if (TASK_SIZE - len < addr)
3212 return -ENOMEM;
3213 - if (!vma || addr + len <= vma->vm_start)
3214 + if (check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3217 if (addr < vma->vm_end) /* handle wraparound */
3218 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3219 if (flags & MAP_FIXED)
3220 return addr;
3221 if (!addr)
3222 - addr = TASK_UNMAPPED_BASE;
3223 + addr = current->mm->mmap_base;
3224
3225 if (filp) {
3226 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3227 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3228 index 8b58bf0..7afff03 100644
3229 --- a/arch/parisc/kernel/traps.c
3230 +++ b/arch/parisc/kernel/traps.c
3231 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3232
3233 down_read(&current->mm->mmap_sem);
3234 vma = find_vma(current->mm,regs->iaoq[0]);
3235 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3236 - && (vma->vm_flags & VM_EXEC)) {
3237 -
3238 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3239 fault_address = regs->iaoq[0];
3240 fault_space = regs->iasq[0];
3241
3242 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3243 index c6afbfc..c5839f6 100644
3244 --- a/arch/parisc/mm/fault.c
3245 +++ b/arch/parisc/mm/fault.c
3246 @@ -15,6 +15,7 @@
3247 #include <linux/sched.h>
3248 #include <linux/interrupt.h>
3249 #include <linux/module.h>
3250 +#include <linux/unistd.h>
3251
3252 #include <asm/uaccess.h>
3253 #include <asm/traps.h>
3254 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3255 static unsigned long
3256 parisc_acctyp(unsigned long code, unsigned int inst)
3257 {
3258 - if (code == 6 || code == 16)
3259 + if (code == 6 || code == 7 || code == 16)
3260 return VM_EXEC;
3261
3262 switch (inst & 0xf0000000) {
3263 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3264 }
3265 #endif
3266
3267 +#ifdef CONFIG_PAX_PAGEEXEC
3268 +/*
3269 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3270 + *
3271 + * returns 1 when task should be killed
3272 + * 2 when rt_sigreturn trampoline was detected
3273 + * 3 when unpatched PLT trampoline was detected
3274 + */
3275 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3276 +{
3277 +
3278 +#ifdef CONFIG_PAX_EMUPLT
3279 + int err;
3280 +
3281 + do { /* PaX: unpatched PLT emulation */
3282 + unsigned int bl, depwi;
3283 +
3284 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3285 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3286 +
3287 + if (err)
3288 + break;
3289 +
3290 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3291 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3292 +
3293 + err = get_user(ldw, (unsigned int *)addr);
3294 + err |= get_user(bv, (unsigned int *)(addr+4));
3295 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3296 +
3297 + if (err)
3298 + break;
3299 +
3300 + if (ldw == 0x0E801096U &&
3301 + bv == 0xEAC0C000U &&
3302 + ldw2 == 0x0E881095U)
3303 + {
3304 + unsigned int resolver, map;
3305 +
3306 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3307 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3308 + if (err)
3309 + break;
3310 +
3311 + regs->gr[20] = instruction_pointer(regs)+8;
3312 + regs->gr[21] = map;
3313 + regs->gr[22] = resolver;
3314 + regs->iaoq[0] = resolver | 3UL;
3315 + regs->iaoq[1] = regs->iaoq[0] + 4;
3316 + return 3;
3317 + }
3318 + }
3319 + } while (0);
3320 +#endif
3321 +
3322 +#ifdef CONFIG_PAX_EMUTRAMP
3323 +
3324 +#ifndef CONFIG_PAX_EMUSIGRT
3325 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3326 + return 1;
3327 +#endif
3328 +
3329 + do { /* PaX: rt_sigreturn emulation */
3330 + unsigned int ldi1, ldi2, bel, nop;
3331 +
3332 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3333 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3334 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3335 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3336 +
3337 + if (err)
3338 + break;
3339 +
3340 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3341 + ldi2 == 0x3414015AU &&
3342 + bel == 0xE4008200U &&
3343 + nop == 0x08000240U)
3344 + {
3345 + regs->gr[25] = (ldi1 & 2) >> 1;
3346 + regs->gr[20] = __NR_rt_sigreturn;
3347 + regs->gr[31] = regs->iaoq[1] + 16;
3348 + regs->sr[0] = regs->iasq[1];
3349 + regs->iaoq[0] = 0x100UL;
3350 + regs->iaoq[1] = regs->iaoq[0] + 4;
3351 + regs->iasq[0] = regs->sr[2];
3352 + regs->iasq[1] = regs->sr[2];
3353 + return 2;
3354 + }
3355 + } while (0);
3356 +#endif
3357 +
3358 + return 1;
3359 +}
3360 +
3361 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3362 +{
3363 + unsigned long i;
3364 +
3365 + printk(KERN_ERR "PAX: bytes at PC: ");
3366 + for (i = 0; i < 5; i++) {
3367 + unsigned int c;
3368 + if (get_user(c, (unsigned int *)pc+i))
3369 + printk(KERN_CONT "???????? ");
3370 + else
3371 + printk(KERN_CONT "%08x ", c);
3372 + }
3373 + printk("\n");
3374 +}
3375 +#endif
3376 +
3377 int fixup_exception(struct pt_regs *regs)
3378 {
3379 const struct exception_table_entry *fix;
3380 @@ -192,8 +303,33 @@ good_area:
3381
3382 acc_type = parisc_acctyp(code,regs->iir);
3383
3384 - if ((vma->vm_flags & acc_type) != acc_type)
3385 + if ((vma->vm_flags & acc_type) != acc_type) {
3386 +
3387 +#ifdef CONFIG_PAX_PAGEEXEC
3388 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3389 + (address & ~3UL) == instruction_pointer(regs))
3390 + {
3391 + up_read(&mm->mmap_sem);
3392 + switch (pax_handle_fetch_fault(regs)) {
3393 +
3394 +#ifdef CONFIG_PAX_EMUPLT
3395 + case 3:
3396 + return;
3397 +#endif
3398 +
3399 +#ifdef CONFIG_PAX_EMUTRAMP
3400 + case 2:
3401 + return;
3402 +#endif
3403 +
3404 + }
3405 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3406 + do_group_exit(SIGKILL);
3407 + }
3408 +#endif
3409 +
3410 goto bad_area;
3411 + }
3412
3413 /*
3414 * If for any reason at all we couldn't handle the fault, make
3415 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
3416 index c107b74..409dc0f 100644
3417 --- a/arch/powerpc/Makefile
3418 +++ b/arch/powerpc/Makefile
3419 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
3420 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
3421 CPP = $(CC) -E $(KBUILD_CFLAGS)
3422
3423 +cflags-y += -Wno-sign-compare -Wno-extra
3424 +
3425 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
3426
3427 ifeq ($(CONFIG_PPC64),y)
3428 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3429 index 81de6eb..d5d0e24 100644
3430 --- a/arch/powerpc/include/asm/cache.h
3431 +++ b/arch/powerpc/include/asm/cache.h
3432 @@ -3,6 +3,7 @@
3433
3434 #ifdef __KERNEL__
3435
3436 +#include <linux/const.h>
3437
3438 /* bytes per L1 cache line */
3439 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3440 @@ -18,7 +19,7 @@
3441 #define L1_CACHE_SHIFT 7
3442 #endif
3443
3444 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3445 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3446
3447 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3448
3449 diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
3450 index 6d94d27..50d4cad 100644
3451 --- a/arch/powerpc/include/asm/device.h
3452 +++ b/arch/powerpc/include/asm/device.h
3453 @@ -14,7 +14,7 @@ struct dev_archdata {
3454 struct device_node *of_node;
3455
3456 /* DMA operations on that device */
3457 - struct dma_map_ops *dma_ops;
3458 + const struct dma_map_ops *dma_ops;
3459
3460 /*
3461 * When an iommu is in use, dma_data is used as a ptr to the base of the
3462 diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
3463 index e281dae..2b8a784 100644
3464 --- a/arch/powerpc/include/asm/dma-mapping.h
3465 +++ b/arch/powerpc/include/asm/dma-mapping.h
3466 @@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
3467 #ifdef CONFIG_PPC64
3468 extern struct dma_map_ops dma_iommu_ops;
3469 #endif
3470 -extern struct dma_map_ops dma_direct_ops;
3471 +extern const struct dma_map_ops dma_direct_ops;
3472
3473 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3474 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3475 {
3476 /* We don't handle the NULL dev case for ISA for now. We could
3477 * do it via an out of line call but it is not needed for now. The
3478 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3479 return dev->archdata.dma_ops;
3480 }
3481
3482 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
3483 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
3484 {
3485 dev->archdata.dma_ops = ops;
3486 }
3487 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
3488
3489 static inline int dma_supported(struct device *dev, u64 mask)
3490 {
3491 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
3492 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3493
3494 if (unlikely(dma_ops == NULL))
3495 return 0;
3496 @@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
3497
3498 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3499 {
3500 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
3501 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3502
3503 if (unlikely(dma_ops == NULL))
3504 return -EIO;
3505 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3506 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3507 dma_addr_t *dma_handle, gfp_t flag)
3508 {
3509 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
3510 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3511 void *cpu_addr;
3512
3513 BUG_ON(!dma_ops);
3514 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3515 static inline void dma_free_coherent(struct device *dev, size_t size,
3516 void *cpu_addr, dma_addr_t dma_handle)
3517 {
3518 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
3519 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3520
3521 BUG_ON(!dma_ops);
3522
3523 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
3524
3525 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
3526 {
3527 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
3528 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3529
3530 if (dma_ops->mapping_error)
3531 return dma_ops->mapping_error(dev, dma_addr);
3532 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3533 index 5698502..5db093c 100644
3534 --- a/arch/powerpc/include/asm/elf.h
3535 +++ b/arch/powerpc/include/asm/elf.h
3536 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3537 the loader. We need to make sure that it is out of the way of the program
3538 that it will "exec", and that there is sufficient room for the brk. */
3539
3540 -extern unsigned long randomize_et_dyn(unsigned long base);
3541 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3542 +#define ELF_ET_DYN_BASE (0x20000000)
3543 +
3544 +#ifdef CONFIG_PAX_ASLR
3545 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3546 +
3547 +#ifdef __powerpc64__
3548 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3549 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3550 +#else
3551 +#define PAX_DELTA_MMAP_LEN 15
3552 +#define PAX_DELTA_STACK_LEN 15
3553 +#endif
3554 +#endif
3555
3556 /*
3557 * Our registers are always unsigned longs, whether we're a 32 bit
3558 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3559 (0x7ff >> (PAGE_SHIFT - 12)) : \
3560 (0x3ffff >> (PAGE_SHIFT - 12)))
3561
3562 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3563 -#define arch_randomize_brk arch_randomize_brk
3564 -
3565 #endif /* __KERNEL__ */
3566
3567 /*
3568 diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
3569 index edfc980..1766f59 100644
3570 --- a/arch/powerpc/include/asm/iommu.h
3571 +++ b/arch/powerpc/include/asm/iommu.h
3572 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
3573 extern void iommu_init_early_dart(void);
3574 extern void iommu_init_early_pasemi(void);
3575
3576 +/* dma-iommu.c */
3577 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
3578 +
3579 #ifdef CONFIG_PCI
3580 extern void pci_iommu_init(void);
3581 extern void pci_direct_iommu_init(void);
3582 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3583 index 9163695..5a00112 100644
3584 --- a/arch/powerpc/include/asm/kmap_types.h
3585 +++ b/arch/powerpc/include/asm/kmap_types.h
3586 @@ -26,6 +26,7 @@ enum km_type {
3587 KM_SOFTIRQ1,
3588 KM_PPC_SYNC_PAGE,
3589 KM_PPC_SYNC_ICACHE,
3590 + KM_CLEARPAGE,
3591 KM_TYPE_NR
3592 };
3593
3594 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3595 index ff24254..fe45b21 100644
3596 --- a/arch/powerpc/include/asm/page.h
3597 +++ b/arch/powerpc/include/asm/page.h
3598 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
3599 * and needs to be executable. This means the whole heap ends
3600 * up being executable.
3601 */
3602 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3603 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3604 +#define VM_DATA_DEFAULT_FLAGS32 \
3605 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3606 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3607
3608 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3609 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3610 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
3611 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3612 #endif
3613
3614 +#define ktla_ktva(addr) (addr)
3615 +#define ktva_ktla(addr) (addr)
3616 +
3617 #ifndef __ASSEMBLY__
3618
3619 #undef STRICT_MM_TYPECHECKS
3620 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3621 index 3f17b83..1f9e766 100644
3622 --- a/arch/powerpc/include/asm/page_64.h
3623 +++ b/arch/powerpc/include/asm/page_64.h
3624 @@ -180,15 +180,18 @@ do { \
3625 * stack by default, so in the absense of a PT_GNU_STACK program header
3626 * we turn execute permission off.
3627 */
3628 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3629 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3630 +#define VM_STACK_DEFAULT_FLAGS32 \
3631 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3632 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3633
3634 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3635 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3636
3637 +#ifndef CONFIG_PAX_PAGEEXEC
3638 #define VM_STACK_DEFAULT_FLAGS \
3639 (test_thread_flag(TIF_32BIT) ? \
3640 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3641 +#endif
3642
3643 #include <asm-generic/getorder.h>
3644
3645 diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
3646 index b5ea626..40308222 100644
3647 --- a/arch/powerpc/include/asm/pci.h
3648 +++ b/arch/powerpc/include/asm/pci.h
3649 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
3650 }
3651
3652 #ifdef CONFIG_PCI
3653 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
3654 -extern struct dma_map_ops *get_pci_dma_ops(void);
3655 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
3656 +extern const struct dma_map_ops *get_pci_dma_ops(void);
3657 #else /* CONFIG_PCI */
3658 #define set_pci_dma_ops(d)
3659 #define get_pci_dma_ops() NULL
3660 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3661 index 2a5da06..d65bea2 100644
3662 --- a/arch/powerpc/include/asm/pgtable.h
3663 +++ b/arch/powerpc/include/asm/pgtable.h
3664 @@ -2,6 +2,7 @@
3665 #define _ASM_POWERPC_PGTABLE_H
3666 #ifdef __KERNEL__
3667
3668 +#include <linux/const.h>
3669 #ifndef __ASSEMBLY__
3670 #include <asm/processor.h> /* For TASK_SIZE */
3671 #include <asm/mmu.h>
3672 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3673 index 4aad413..85d86bf 100644
3674 --- a/arch/powerpc/include/asm/pte-hash32.h
3675 +++ b/arch/powerpc/include/asm/pte-hash32.h
3676 @@ -21,6 +21,7 @@
3677 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3678 #define _PAGE_USER 0x004 /* usermode access allowed */
3679 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3680 +#define _PAGE_EXEC _PAGE_GUARDED
3681 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3682 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3683 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3684 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
3685 index 8c34149..78f425a 100644
3686 --- a/arch/powerpc/include/asm/ptrace.h
3687 +++ b/arch/powerpc/include/asm/ptrace.h
3688 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
3689 } while(0)
3690
3691 struct task_struct;
3692 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
3693 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
3694 extern int ptrace_put_reg(struct task_struct *task, int regno,
3695 unsigned long data);
3696
3697 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3698 index 32a7c30..be3a8bb 100644
3699 --- a/arch/powerpc/include/asm/reg.h
3700 +++ b/arch/powerpc/include/asm/reg.h
3701 @@ -191,6 +191,7 @@
3702 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3703 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3704 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3705 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3706 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3707 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3708 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3709 diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
3710 index 8979d4c..d2fd0d3 100644
3711 --- a/arch/powerpc/include/asm/swiotlb.h
3712 +++ b/arch/powerpc/include/asm/swiotlb.h
3713 @@ -13,7 +13,7 @@
3714
3715 #include <linux/swiotlb.h>
3716
3717 -extern struct dma_map_ops swiotlb_dma_ops;
3718 +extern const struct dma_map_ops swiotlb_dma_ops;
3719
3720 static inline void dma_mark_clean(void *addr, size_t size) {}
3721
3722 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3723 index 094a12a..877a60a 100644
3724 --- a/arch/powerpc/include/asm/system.h
3725 +++ b/arch/powerpc/include/asm/system.h
3726 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3727 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3728 #endif
3729
3730 -extern unsigned long arch_align_stack(unsigned long sp);
3731 +#define arch_align_stack(x) ((x) & ~0xfUL)
3732
3733 /* Used in very early kernel initialization. */
3734 extern unsigned long reloc_offset(void);
3735 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3736 index bd0fb84..a42a14b 100644
3737 --- a/arch/powerpc/include/asm/uaccess.h
3738 +++ b/arch/powerpc/include/asm/uaccess.h
3739 @@ -13,6 +13,8 @@
3740 #define VERIFY_READ 0
3741 #define VERIFY_WRITE 1
3742
3743 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3744 +
3745 /*
3746 * The fs value determines whether argument validity checking should be
3747 * performed or not. If get_fs() == USER_DS, checking is performed, with
3748 @@ -327,52 +329,6 @@ do { \
3749 extern unsigned long __copy_tofrom_user(void __user *to,
3750 const void __user *from, unsigned long size);
3751
3752 -#ifndef __powerpc64__
3753 -
3754 -static inline unsigned long copy_from_user(void *to,
3755 - const void __user *from, unsigned long n)
3756 -{
3757 - unsigned long over;
3758 -
3759 - if (access_ok(VERIFY_READ, from, n))
3760 - return __copy_tofrom_user((__force void __user *)to, from, n);
3761 - if ((unsigned long)from < TASK_SIZE) {
3762 - over = (unsigned long)from + n - TASK_SIZE;
3763 - return __copy_tofrom_user((__force void __user *)to, from,
3764 - n - over) + over;
3765 - }
3766 - return n;
3767 -}
3768 -
3769 -static inline unsigned long copy_to_user(void __user *to,
3770 - const void *from, unsigned long n)
3771 -{
3772 - unsigned long over;
3773 -
3774 - if (access_ok(VERIFY_WRITE, to, n))
3775 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3776 - if ((unsigned long)to < TASK_SIZE) {
3777 - over = (unsigned long)to + n - TASK_SIZE;
3778 - return __copy_tofrom_user(to, (__force void __user *)from,
3779 - n - over) + over;
3780 - }
3781 - return n;
3782 -}
3783 -
3784 -#else /* __powerpc64__ */
3785 -
3786 -#define __copy_in_user(to, from, size) \
3787 - __copy_tofrom_user((to), (from), (size))
3788 -
3789 -extern unsigned long copy_from_user(void *to, const void __user *from,
3790 - unsigned long n);
3791 -extern unsigned long copy_to_user(void __user *to, const void *from,
3792 - unsigned long n);
3793 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3794 - unsigned long n);
3795 -
3796 -#endif /* __powerpc64__ */
3797 -
3798 static inline unsigned long __copy_from_user_inatomic(void *to,
3799 const void __user *from, unsigned long n)
3800 {
3801 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3802 if (ret == 0)
3803 return 0;
3804 }
3805 +
3806 + if (!__builtin_constant_p(n))
3807 + check_object_size(to, n, false);
3808 +
3809 return __copy_tofrom_user((__force void __user *)to, from, n);
3810 }
3811
3812 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3813 if (ret == 0)
3814 return 0;
3815 }
3816 +
3817 + if (!__builtin_constant_p(n))
3818 + check_object_size(from, n, true);
3819 +
3820 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3821 }
3822
3823 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3824 return __copy_to_user_inatomic(to, from, size);
3825 }
3826
3827 +#ifndef __powerpc64__
3828 +
3829 +static inline unsigned long __must_check copy_from_user(void *to,
3830 + const void __user *from, unsigned long n)
3831 +{
3832 + unsigned long over;
3833 +
3834 + if ((long)n < 0)
3835 + return n;
3836 +
3837 + if (access_ok(VERIFY_READ, from, n)) {
3838 + if (!__builtin_constant_p(n))
3839 + check_object_size(to, n, false);
3840 + return __copy_tofrom_user((__force void __user *)to, from, n);
3841 + }
3842 + if ((unsigned long)from < TASK_SIZE) {
3843 + over = (unsigned long)from + n - TASK_SIZE;
3844 + if (!__builtin_constant_p(n - over))
3845 + check_object_size(to, n - over, false);
3846 + return __copy_tofrom_user((__force void __user *)to, from,
3847 + n - over) + over;
3848 + }
3849 + return n;
3850 +}
3851 +
3852 +static inline unsigned long __must_check copy_to_user(void __user *to,
3853 + const void *from, unsigned long n)
3854 +{
3855 + unsigned long over;
3856 +
3857 + if ((long)n < 0)
3858 + return n;
3859 +
3860 + if (access_ok(VERIFY_WRITE, to, n)) {
3861 + if (!__builtin_constant_p(n))
3862 + check_object_size(from, n, true);
3863 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3864 + }
3865 + if ((unsigned long)to < TASK_SIZE) {
3866 + over = (unsigned long)to + n - TASK_SIZE;
3867 + if (!__builtin_constant_p(n))
3868 + check_object_size(from, n - over, true);
3869 + return __copy_tofrom_user(to, (__force void __user *)from,
3870 + n - over) + over;
3871 + }
3872 + return n;
3873 +}
3874 +
3875 +#else /* __powerpc64__ */
3876 +
3877 +#define __copy_in_user(to, from, size) \
3878 + __copy_tofrom_user((to), (from), (size))
3879 +
3880 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3881 +{
3882 + if ((long)n < 0 || n > INT_MAX)
3883 + return n;
3884 +
3885 + if (!__builtin_constant_p(n))
3886 + check_object_size(to, n, false);
3887 +
3888 + if (likely(access_ok(VERIFY_READ, from, n)))
3889 + n = __copy_from_user(to, from, n);
3890 + else
3891 + memset(to, 0, n);
3892 + return n;
3893 +}
3894 +
3895 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3896 +{
3897 + if ((long)n < 0 || n > INT_MAX)
3898 + return n;
3899 +
3900 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3901 + if (!__builtin_constant_p(n))
3902 + check_object_size(from, n, true);
3903 + n = __copy_to_user(to, from, n);
3904 + }
3905 + return n;
3906 +}
3907 +
3908 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3909 + unsigned long n);
3910 +
3911 +#endif /* __powerpc64__ */
3912 +
3913 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3914
3915 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3916 diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3917 index bb37b1d..01fe9ce 100644
3918 --- a/arch/powerpc/kernel/cacheinfo.c
3919 +++ b/arch/powerpc/kernel/cacheinfo.c
3920 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3921 &cache_assoc_attr,
3922 };
3923
3924 -static struct sysfs_ops cache_index_ops = {
3925 +static const struct sysfs_ops cache_index_ops = {
3926 .show = cache_index_show,
3927 };
3928
3929 diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3930 index 37771a5..648530c 100644
3931 --- a/arch/powerpc/kernel/dma-iommu.c
3932 +++ b/arch/powerpc/kernel/dma-iommu.c
3933 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3934 }
3935
3936 /* We support DMA to/from any memory page via the iommu */
3937 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3938 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
3939 {
3940 struct iommu_table *tbl = get_iommu_table_base(dev);
3941
3942 diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3943 index e96cbbd..bdd6d41 100644
3944 --- a/arch/powerpc/kernel/dma-swiotlb.c
3945 +++ b/arch/powerpc/kernel/dma-swiotlb.c
3946 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3947 * map_page, and unmap_page on highmem, use normal dma_ops
3948 * for everything else.
3949 */
3950 -struct dma_map_ops swiotlb_dma_ops = {
3951 +const struct dma_map_ops swiotlb_dma_ops = {
3952 .alloc_coherent = dma_direct_alloc_coherent,
3953 .free_coherent = dma_direct_free_coherent,
3954 .map_sg = swiotlb_map_sg_attrs,
3955 diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3956 index 6215062..ebea59c 100644
3957 --- a/arch/powerpc/kernel/dma.c
3958 +++ b/arch/powerpc/kernel/dma.c
3959 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3960 }
3961 #endif
3962
3963 -struct dma_map_ops dma_direct_ops = {
3964 +const struct dma_map_ops dma_direct_ops = {
3965 .alloc_coherent = dma_direct_alloc_coherent,
3966 .free_coherent = dma_direct_free_coherent,
3967 .map_sg = dma_direct_map_sg,
3968 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3969 index 24dcc0e..a300455 100644
3970 --- a/arch/powerpc/kernel/exceptions-64e.S
3971 +++ b/arch/powerpc/kernel/exceptions-64e.S
3972 @@ -455,6 +455,7 @@ storage_fault_common:
3973 std r14,_DAR(r1)
3974 std r15,_DSISR(r1)
3975 addi r3,r1,STACK_FRAME_OVERHEAD
3976 + bl .save_nvgprs
3977 mr r4,r14
3978 mr r5,r15
3979 ld r14,PACA_EXGEN+EX_R14(r13)
3980 @@ -464,8 +465,7 @@ storage_fault_common:
3981 cmpdi r3,0
3982 bne- 1f
3983 b .ret_from_except_lite
3984 -1: bl .save_nvgprs
3985 - mr r5,r3
3986 +1: mr r5,r3
3987 addi r3,r1,STACK_FRAME_OVERHEAD
3988 ld r4,_DAR(r1)
3989 bl .bad_page_fault
3990 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3991 index 1808876..9fd206a 100644
3992 --- a/arch/powerpc/kernel/exceptions-64s.S
3993 +++ b/arch/powerpc/kernel/exceptions-64s.S
3994 @@ -818,10 +818,10 @@ handle_page_fault:
3995 11: ld r4,_DAR(r1)
3996 ld r5,_DSISR(r1)
3997 addi r3,r1,STACK_FRAME_OVERHEAD
3998 + bl .save_nvgprs
3999 bl .do_page_fault
4000 cmpdi r3,0
4001 beq+ 13f
4002 - bl .save_nvgprs
4003 mr r5,r3
4004 addi r3,r1,STACK_FRAME_OVERHEAD
4005 lwz r4,_DAR(r1)
4006 diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
4007 index a4c8b38..1b09ad9 100644
4008 --- a/arch/powerpc/kernel/ibmebus.c
4009 +++ b/arch/powerpc/kernel/ibmebus.c
4010 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
4011 return 1;
4012 }
4013
4014 -static struct dma_map_ops ibmebus_dma_ops = {
4015 +static const struct dma_map_ops ibmebus_dma_ops = {
4016 .alloc_coherent = ibmebus_alloc_coherent,
4017 .free_coherent = ibmebus_free_coherent,
4018 .map_sg = ibmebus_map_sg,
4019 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
4020 index 8564a41..67f3471 100644
4021 --- a/arch/powerpc/kernel/irq.c
4022 +++ b/arch/powerpc/kernel/irq.c
4023 @@ -490,9 +490,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
4024 host->ops = ops;
4025 host->of_node = of_node_get(of_node);
4026
4027 - if (host->ops->match == NULL)
4028 - host->ops->match = default_irq_host_match;
4029 -
4030 spin_lock_irqsave(&irq_big_lock, flags);
4031
4032 /* If it's a legacy controller, check for duplicates and
4033 @@ -567,7 +564,12 @@ struct irq_host *irq_find_host(struct device_node *node)
4034 */
4035 spin_lock_irqsave(&irq_big_lock, flags);
4036 list_for_each_entry(h, &irq_hosts, link)
4037 - if (h->ops->match(h, node)) {
4038 + if (h->ops->match) {
4039 + if (h->ops->match(h, node)) {
4040 + found = h;
4041 + break;
4042 + }
4043 + } else if (default_irq_host_match(h, node)) {
4044 found = h;
4045 break;
4046 }
4047 diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
4048 index 641c74b..8339ad7 100644
4049 --- a/arch/powerpc/kernel/kgdb.c
4050 +++ b/arch/powerpc/kernel/kgdb.c
4051 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
4052 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
4053 return 0;
4054
4055 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
4056 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
4057 regs->nip += 4;
4058
4059 return 1;
4060 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
4061 /*
4062 * Global data
4063 */
4064 -struct kgdb_arch arch_kgdb_ops = {
4065 +const struct kgdb_arch arch_kgdb_ops = {
4066 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
4067 };
4068
4069 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
4070 index 477c663..4f50234 100644
4071 --- a/arch/powerpc/kernel/module.c
4072 +++ b/arch/powerpc/kernel/module.c
4073 @@ -31,11 +31,24 @@
4074
4075 LIST_HEAD(module_bug_list);
4076
4077 +#ifdef CONFIG_PAX_KERNEXEC
4078 void *module_alloc(unsigned long size)
4079 {
4080 if (size == 0)
4081 return NULL;
4082
4083 + return vmalloc(size);
4084 +}
4085 +
4086 +void *module_alloc_exec(unsigned long size)
4087 +#else
4088 +void *module_alloc(unsigned long size)
4089 +#endif
4090 +
4091 +{
4092 + if (size == 0)
4093 + return NULL;
4094 +
4095 return vmalloc_exec(size);
4096 }
4097
4098 @@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
4099 vfree(module_region);
4100 }
4101
4102 +#ifdef CONFIG_PAX_KERNEXEC
4103 +void module_free_exec(struct module *mod, void *module_region)
4104 +{
4105 + module_free(mod, module_region);
4106 +}
4107 +#endif
4108 +
4109 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
4110 const Elf_Shdr *sechdrs,
4111 const char *name)
4112 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4113 index f832773..0507238 100644
4114 --- a/arch/powerpc/kernel/module_32.c
4115 +++ b/arch/powerpc/kernel/module_32.c
4116 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4117 me->arch.core_plt_section = i;
4118 }
4119 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4120 - printk("Module doesn't contain .plt or .init.plt sections.\n");
4121 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4122 return -ENOEXEC;
4123 }
4124
4125 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
4126
4127 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4128 /* Init, or core PLT? */
4129 - if (location >= mod->module_core
4130 - && location < mod->module_core + mod->core_size)
4131 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4132 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4133 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4134 - else
4135 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4136 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4137 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4138 + else {
4139 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4140 + return ~0UL;
4141 + }
4142
4143 /* Find this entry, or if that fails, the next avail. entry */
4144 while (entry->jump[0]) {
4145 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
4146 index cadbed6..b9bbb00 100644
4147 --- a/arch/powerpc/kernel/pci-common.c
4148 +++ b/arch/powerpc/kernel/pci-common.c
4149 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
4150 unsigned int ppc_pci_flags = 0;
4151
4152
4153 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
4154 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
4155
4156 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
4157 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
4158 {
4159 pci_dma_ops = dma_ops;
4160 }
4161
4162 -struct dma_map_ops *get_pci_dma_ops(void)
4163 +const struct dma_map_ops *get_pci_dma_ops(void)
4164 {
4165 return pci_dma_ops;
4166 }
4167 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4168 index 7b816da..8d5c277 100644
4169 --- a/arch/powerpc/kernel/process.c
4170 +++ b/arch/powerpc/kernel/process.c
4171 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
4172 * Lookup NIP late so we have the best change of getting the
4173 * above info out without failing
4174 */
4175 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4176 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4177 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4178 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4179 #endif
4180 show_stack(current, (unsigned long *) regs->gpr[1]);
4181 if (!user_mode(regs))
4182 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4183 newsp = stack[0];
4184 ip = stack[STACK_FRAME_LR_SAVE];
4185 if (!firstframe || ip != lr) {
4186 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4187 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4188 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4189 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4190 - printk(" (%pS)",
4191 + printk(" (%pA)",
4192 (void *)current->ret_stack[curr_frame].ret);
4193 curr_frame--;
4194 }
4195 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4196 struct pt_regs *regs = (struct pt_regs *)
4197 (sp + STACK_FRAME_OVERHEAD);
4198 lr = regs->link;
4199 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4200 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4201 regs->trap, (void *)regs->nip, (void *)lr);
4202 firstframe = 1;
4203 }
4204 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
4205 }
4206
4207 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4208 -
4209 -unsigned long arch_align_stack(unsigned long sp)
4210 -{
4211 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4212 - sp -= get_random_int() & ~PAGE_MASK;
4213 - return sp & ~0xf;
4214 -}
4215 -
4216 -static inline unsigned long brk_rnd(void)
4217 -{
4218 - unsigned long rnd = 0;
4219 -
4220 - /* 8MB for 32bit, 1GB for 64bit */
4221 - if (is_32bit_task())
4222 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4223 - else
4224 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4225 -
4226 - return rnd << PAGE_SHIFT;
4227 -}
4228 -
4229 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4230 -{
4231 - unsigned long base = mm->brk;
4232 - unsigned long ret;
4233 -
4234 -#ifdef CONFIG_PPC_STD_MMU_64
4235 - /*
4236 - * If we are using 1TB segments and we are allowed to randomise
4237 - * the heap, we can put it above 1TB so it is backed by a 1TB
4238 - * segment. Otherwise the heap will be in the bottom 1TB
4239 - * which always uses 256MB segments and this may result in a
4240 - * performance penalty.
4241 - */
4242 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4243 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4244 -#endif
4245 -
4246 - ret = PAGE_ALIGN(base + brk_rnd());
4247 -
4248 - if (ret < mm->brk)
4249 - return mm->brk;
4250 -
4251 - return ret;
4252 -}
4253 -
4254 -unsigned long randomize_et_dyn(unsigned long base)
4255 -{
4256 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4257 -
4258 - if (ret < base)
4259 - return base;
4260 -
4261 - return ret;
4262 -}
4263 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4264 index ef14988..856c4bc 100644
4265 --- a/arch/powerpc/kernel/ptrace.c
4266 +++ b/arch/powerpc/kernel/ptrace.c
4267 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
4268 /*
4269 * Get contents of register REGNO in task TASK.
4270 */
4271 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
4272 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
4273 {
4274 if (task->thread.regs == NULL)
4275 return -EIO;
4276 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
4277
4278 CHECK_FULL_REGS(child->thread.regs);
4279 if (index < PT_FPR0) {
4280 - tmp = ptrace_get_reg(child, (int) index);
4281 + tmp = ptrace_get_reg(child, index);
4282 } else {
4283 flush_fp_to_thread(child);
4284 tmp = ((unsigned long *)child->thread.fpr)
4285 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4286 index d670429..2bc59b2 100644
4287 --- a/arch/powerpc/kernel/signal_32.c
4288 +++ b/arch/powerpc/kernel/signal_32.c
4289 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4290 /* Save user registers on the stack */
4291 frame = &rt_sf->uc.uc_mcontext;
4292 addr = frame;
4293 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4294 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4295 if (save_user_regs(regs, frame, 0, 1))
4296 goto badframe;
4297 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4298 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4299 index 2fe6fc6..ada0d96 100644
4300 --- a/arch/powerpc/kernel/signal_64.c
4301 +++ b/arch/powerpc/kernel/signal_64.c
4302 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4303 current->thread.fpscr.val = 0;
4304
4305 /* Set up to return from userspace. */
4306 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4307 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4308 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4309 } else {
4310 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4311 diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
4312 index b97c2d6..dd01a6a 100644
4313 --- a/arch/powerpc/kernel/sys_ppc32.c
4314 +++ b/arch/powerpc/kernel/sys_ppc32.c
4315 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
4316 if (oldlenp) {
4317 if (!error) {
4318 if (get_user(oldlen, oldlenp) ||
4319 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
4320 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
4321 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
4322 error = -EFAULT;
4323 }
4324 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
4325 }
4326 return error;
4327 }
4328 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4329 index 6f0ae1a..e4b6a56 100644
4330 --- a/arch/powerpc/kernel/traps.c
4331 +++ b/arch/powerpc/kernel/traps.c
4332 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
4333 static inline void pmac_backlight_unblank(void) { }
4334 #endif
4335
4336 +extern void gr_handle_kernel_exploit(void);
4337 +
4338 int die(const char *str, struct pt_regs *regs, long err)
4339 {
4340 static struct {
4341 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
4342 if (panic_on_oops)
4343 panic("Fatal exception");
4344
4345 + gr_handle_kernel_exploit();
4346 +
4347 oops_exit();
4348 do_exit(err);
4349
4350 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4351 index 137dc22..fe57a79 100644
4352 --- a/arch/powerpc/kernel/vdso.c
4353 +++ b/arch/powerpc/kernel/vdso.c
4354 @@ -36,6 +36,7 @@
4355 #include <asm/firmware.h>
4356 #include <asm/vdso.h>
4357 #include <asm/vdso_datapage.h>
4358 +#include <asm/mman.h>
4359
4360 #include "setup.h"
4361
4362 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4363 vdso_base = VDSO32_MBASE;
4364 #endif
4365
4366 - current->mm->context.vdso_base = 0;
4367 + current->mm->context.vdso_base = ~0UL;
4368
4369 /* vDSO has a problem and was disabled, just don't "enable" it for the
4370 * process
4371 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4372 vdso_base = get_unmapped_area(NULL, vdso_base,
4373 (vdso_pages << PAGE_SHIFT) +
4374 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4375 - 0, 0);
4376 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4377 if (IS_ERR_VALUE(vdso_base)) {
4378 rc = vdso_base;
4379 goto fail_mmapsem;
4380 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
4381 index 77f6421..829564a 100644
4382 --- a/arch/powerpc/kernel/vio.c
4383 +++ b/arch/powerpc/kernel/vio.c
4384 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
4385 vio_cmo_dealloc(viodev, alloc_size);
4386 }
4387
4388 -struct dma_map_ops vio_dma_mapping_ops = {
4389 +static const struct dma_map_ops vio_dma_mapping_ops = {
4390 .alloc_coherent = vio_dma_iommu_alloc_coherent,
4391 .free_coherent = vio_dma_iommu_free_coherent,
4392 .map_sg = vio_dma_iommu_map_sg,
4393 .unmap_sg = vio_dma_iommu_unmap_sg,
4394 + .dma_supported = dma_iommu_dma_supported,
4395 .map_page = vio_dma_iommu_map_page,
4396 .unmap_page = vio_dma_iommu_unmap_page,
4397
4398 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
4399
4400 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
4401 {
4402 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
4403 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
4404 }
4405
4406 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4407 index 5eea6f3..5d10396 100644
4408 --- a/arch/powerpc/lib/usercopy_64.c
4409 +++ b/arch/powerpc/lib/usercopy_64.c
4410 @@ -9,22 +9,6 @@
4411 #include <linux/module.h>
4412 #include <asm/uaccess.h>
4413
4414 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4415 -{
4416 - if (likely(access_ok(VERIFY_READ, from, n)))
4417 - n = __copy_from_user(to, from, n);
4418 - else
4419 - memset(to, 0, n);
4420 - return n;
4421 -}
4422 -
4423 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4424 -{
4425 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4426 - n = __copy_to_user(to, from, n);
4427 - return n;
4428 -}
4429 -
4430 unsigned long copy_in_user(void __user *to, const void __user *from,
4431 unsigned long n)
4432 {
4433 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4434 return n;
4435 }
4436
4437 -EXPORT_SYMBOL(copy_from_user);
4438 -EXPORT_SYMBOL(copy_to_user);
4439 EXPORT_SYMBOL(copy_in_user);
4440
4441 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4442 index e7dae82..877ce0d 100644
4443 --- a/arch/powerpc/mm/fault.c
4444 +++ b/arch/powerpc/mm/fault.c
4445 @@ -30,6 +30,10 @@
4446 #include <linux/kprobes.h>
4447 #include <linux/kdebug.h>
4448 #include <linux/perf_event.h>
4449 +#include <linux/slab.h>
4450 +#include <linux/pagemap.h>
4451 +#include <linux/compiler.h>
4452 +#include <linux/unistd.h>
4453
4454 #include <asm/firmware.h>
4455 #include <asm/page.h>
4456 @@ -40,6 +44,7 @@
4457 #include <asm/uaccess.h>
4458 #include <asm/tlbflush.h>
4459 #include <asm/siginfo.h>
4460 +#include <asm/ptrace.h>
4461
4462
4463 #ifdef CONFIG_KPROBES
4464 @@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4465 }
4466 #endif
4467
4468 +#ifdef CONFIG_PAX_PAGEEXEC
4469 +/*
4470 + * PaX: decide what to do with offenders (regs->nip = fault address)
4471 + *
4472 + * returns 1 when task should be killed
4473 + */
4474 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4475 +{
4476 + return 1;
4477 +}
4478 +
4479 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4480 +{
4481 + unsigned long i;
4482 +
4483 + printk(KERN_ERR "PAX: bytes at PC: ");
4484 + for (i = 0; i < 5; i++) {
4485 + unsigned int c;
4486 + if (get_user(c, (unsigned int __user *)pc+i))
4487 + printk(KERN_CONT "???????? ");
4488 + else
4489 + printk(KERN_CONT "%08x ", c);
4490 + }
4491 + printk("\n");
4492 +}
4493 +#endif
4494 +
4495 /*
4496 * Check whether the instruction at regs->nip is a store using
4497 * an update addressing form which will update r1.
4498 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4499 * indicate errors in DSISR but can validly be set in SRR1.
4500 */
4501 if (trap == 0x400)
4502 - error_code &= 0x48200000;
4503 + error_code &= 0x58200000;
4504 else
4505 is_write = error_code & DSISR_ISSTORE;
4506 #else
4507 @@ -250,7 +282,7 @@ good_area:
4508 * "undefined". Of those that can be set, this is the only
4509 * one which seems bad.
4510 */
4511 - if (error_code & 0x10000000)
4512 + if (error_code & DSISR_GUARDED)
4513 /* Guarded storage error. */
4514 goto bad_area;
4515 #endif /* CONFIG_8xx */
4516 @@ -265,7 +297,7 @@ good_area:
4517 * processors use the same I/D cache coherency mechanism
4518 * as embedded.
4519 */
4520 - if (error_code & DSISR_PROTFAULT)
4521 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4522 goto bad_area;
4523 #endif /* CONFIG_PPC_STD_MMU */
4524
4525 @@ -335,6 +367,23 @@ bad_area:
4526 bad_area_nosemaphore:
4527 /* User mode accesses cause a SIGSEGV */
4528 if (user_mode(regs)) {
4529 +
4530 +#ifdef CONFIG_PAX_PAGEEXEC
4531 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4532 +#ifdef CONFIG_PPC_STD_MMU
4533 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4534 +#else
4535 + if (is_exec && regs->nip == address) {
4536 +#endif
4537 + switch (pax_handle_fetch_fault(regs)) {
4538 + }
4539 +
4540 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4541 + do_group_exit(SIGKILL);
4542 + }
4543 + }
4544 +#endif
4545 +
4546 _exception(SIGSEGV, regs, code, address);
4547 return 0;
4548 }
4549 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
4550 index 5973631..ad617af 100644
4551 --- a/arch/powerpc/mm/mem.c
4552 +++ b/arch/powerpc/mm/mem.c
4553 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
4554 {
4555 unsigned long lmb_next_region_start_pfn,
4556 lmb_region_max_pfn;
4557 - int i;
4558 + unsigned int i;
4559
4560 for (i = 0; i < lmb.memory.cnt - 1; i++) {
4561 lmb_region_max_pfn =
4562 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4563 index 0d957a4..26d968f 100644
4564 --- a/arch/powerpc/mm/mmap_64.c
4565 +++ b/arch/powerpc/mm/mmap_64.c
4566 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4567 */
4568 if (mmap_is_legacy()) {
4569 mm->mmap_base = TASK_UNMAPPED_BASE;
4570 +
4571 +#ifdef CONFIG_PAX_RANDMMAP
4572 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4573 + mm->mmap_base += mm->delta_mmap;
4574 +#endif
4575 +
4576 mm->get_unmapped_area = arch_get_unmapped_area;
4577 mm->unmap_area = arch_unmap_area;
4578 } else {
4579 mm->mmap_base = mmap_base();
4580 +
4581 +#ifdef CONFIG_PAX_RANDMMAP
4582 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4583 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4584 +#endif
4585 +
4586 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4587 mm->unmap_area = arch_unmap_area_topdown;
4588 }
4589 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4590 index ba51948..23009d9 100644
4591 --- a/arch/powerpc/mm/slice.c
4592 +++ b/arch/powerpc/mm/slice.c
4593 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4594 if ((mm->task_size - len) < addr)
4595 return 0;
4596 vma = find_vma(mm, addr);
4597 - return (!vma || (addr + len) <= vma->vm_start);
4598 + return check_heap_stack_gap(vma, addr, len);
4599 }
4600
4601 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4602 @@ -256,7 +256,7 @@ full_search:
4603 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4604 continue;
4605 }
4606 - if (!vma || addr + len <= vma->vm_start) {
4607 + if (check_heap_stack_gap(vma, addr, len)) {
4608 /*
4609 * Remember the place where we stopped the search:
4610 */
4611 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4612 }
4613 }
4614
4615 - addr = mm->mmap_base;
4616 - while (addr > len) {
4617 + if (mm->mmap_base < len)
4618 + addr = -ENOMEM;
4619 + else
4620 + addr = mm->mmap_base - len;
4621 +
4622 + while (!IS_ERR_VALUE(addr)) {
4623 /* Go down by chunk size */
4624 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4625 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4626
4627 /* Check for hit with different page size */
4628 mask = slice_range_to_mask(addr, len);
4629 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4630 * return with success:
4631 */
4632 vma = find_vma(mm, addr);
4633 - if (!vma || (addr + len) <= vma->vm_start) {
4634 + if (check_heap_stack_gap(vma, addr, len)) {
4635 /* remember the address as a hint for next time */
4636 if (use_cache)
4637 mm->free_area_cache = addr;
4638 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4639 mm->cached_hole_size = vma->vm_start - addr;
4640
4641 /* try just below the current vma->vm_start */
4642 - addr = vma->vm_start;
4643 + addr = skip_heap_stack_gap(vma, len);
4644 }
4645
4646 /*
4647 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4648 if (fixed && addr > (mm->task_size - len))
4649 return -EINVAL;
4650
4651 +#ifdef CONFIG_PAX_RANDMMAP
4652 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4653 + addr = 0;
4654 +#endif
4655 +
4656 /* If hint, make sure it matches our alignment restrictions */
4657 if (!fixed && addr) {
4658 addr = _ALIGN_UP(addr, 1ul << pshift);
4659 diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
4660 index b5c753d..8f01abe 100644
4661 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c
4662 +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
4663 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
4664 lite5200_pm_target_state = PM_SUSPEND_ON;
4665 }
4666
4667 -static struct platform_suspend_ops lite5200_pm_ops = {
4668 +static const struct platform_suspend_ops lite5200_pm_ops = {
4669 .valid = lite5200_pm_valid,
4670 .begin = lite5200_pm_begin,
4671 .prepare = lite5200_pm_prepare,
4672 diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4673 index a55b0b6..478c18e 100644
4674 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4675 +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4676 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
4677 iounmap(mbar);
4678 }
4679
4680 -static struct platform_suspend_ops mpc52xx_pm_ops = {
4681 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
4682 .valid = mpc52xx_pm_valid,
4683 .prepare = mpc52xx_pm_prepare,
4684 .enter = mpc52xx_pm_enter,
4685 diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
4686 index 08e65fc..643d3ac 100644
4687 --- a/arch/powerpc/platforms/83xx/suspend.c
4688 +++ b/arch/powerpc/platforms/83xx/suspend.c
4689 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
4690 return ret;
4691 }
4692
4693 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
4694 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
4695 .valid = mpc83xx_suspend_valid,
4696 .begin = mpc83xx_suspend_begin,
4697 .enter = mpc83xx_suspend_enter,
4698 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
4699 index ca5bfdf..1602e09 100644
4700 --- a/arch/powerpc/platforms/cell/iommu.c
4701 +++ b/arch/powerpc/platforms/cell/iommu.c
4702 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
4703
4704 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
4705
4706 -struct dma_map_ops dma_iommu_fixed_ops = {
4707 +const struct dma_map_ops dma_iommu_fixed_ops = {
4708 .alloc_coherent = dma_fixed_alloc_coherent,
4709 .free_coherent = dma_fixed_free_coherent,
4710 .map_sg = dma_fixed_map_sg,
4711 diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
4712 index e34b305..20e48ec 100644
4713 --- a/arch/powerpc/platforms/ps3/system-bus.c
4714 +++ b/arch/powerpc/platforms/ps3/system-bus.c
4715 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
4716 return mask >= DMA_BIT_MASK(32);
4717 }
4718
4719 -static struct dma_map_ops ps3_sb_dma_ops = {
4720 +static const struct dma_map_ops ps3_sb_dma_ops = {
4721 .alloc_coherent = ps3_alloc_coherent,
4722 .free_coherent = ps3_free_coherent,
4723 .map_sg = ps3_sb_map_sg,
4724 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
4725 .unmap_page = ps3_unmap_page,
4726 };
4727
4728 -static struct dma_map_ops ps3_ioc0_dma_ops = {
4729 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
4730 .alloc_coherent = ps3_alloc_coherent,
4731 .free_coherent = ps3_free_coherent,
4732 .map_sg = ps3_ioc0_map_sg,
4733 diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
4734 index f0e6f28..60d53ed 100644
4735 --- a/arch/powerpc/platforms/pseries/Kconfig
4736 +++ b/arch/powerpc/platforms/pseries/Kconfig
4737 @@ -2,6 +2,8 @@ config PPC_PSERIES
4738 depends on PPC64 && PPC_BOOK3S
4739 bool "IBM pSeries & new (POWER5-based) iSeries"
4740 select MPIC
4741 + select PCI_MSI
4742 + select XICS
4743 select PPC_I8259
4744 select PPC_RTAS
4745 select RTAS_ERROR_LOGGING
4746 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
4747 index aca7fff..76c2b6b 100644
4748 --- a/arch/s390/Kconfig
4749 +++ b/arch/s390/Kconfig
4750 @@ -197,28 +197,26 @@ config AUDIT_ARCH
4751
4752 config S390_SWITCH_AMODE
4753 bool "Switch kernel/user addressing modes"
4754 + default y
4755 help
4756 This option allows to switch the addressing modes of kernel and user
4757 - space. The kernel parameter switch_amode=on will enable this feature,
4758 - default is disabled. Enabling this (via kernel parameter) on machines
4759 - earlier than IBM System z9-109 EC/BC will reduce system performance.
4760 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
4761 + will reduce system performance.
4762
4763 Note that this option will also be selected by selecting the execute
4764 - protection option below. Enabling the execute protection via the
4765 - noexec kernel parameter will also switch the addressing modes,
4766 - independent of the switch_amode kernel parameter.
4767 + protection option below. Enabling the execute protection will also
4768 + switch the addressing modes, independent of this option.
4769
4770
4771 config S390_EXEC_PROTECT
4772 bool "Data execute protection"
4773 + default y
4774 select S390_SWITCH_AMODE
4775 help
4776 This option allows to enable a buffer overflow protection for user
4777 space programs and it also selects the addressing mode option above.
4778 - The kernel parameter noexec=on will enable this feature and also
4779 - switch the addressing modes, default is disabled. Enabling this (via
4780 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4781 - will reduce system performance.
4782 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
4783 + reduce system performance.
4784
4785 comment "Code generation options"
4786
4787 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4788 index ae7c8f9..3f01a0c 100644
4789 --- a/arch/s390/include/asm/atomic.h
4790 +++ b/arch/s390/include/asm/atomic.h
4791 @@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4792 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4793 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4794
4795 +#define atomic64_read_unchecked(v) atomic64_read(v)
4796 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4797 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4798 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4799 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4800 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4801 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4802 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4803 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4804 +
4805 #define smp_mb__before_atomic_dec() smp_mb()
4806 #define smp_mb__after_atomic_dec() smp_mb()
4807 #define smp_mb__before_atomic_inc() smp_mb()
4808 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4809 index 9b86681..c5140db 100644
4810 --- a/arch/s390/include/asm/cache.h
4811 +++ b/arch/s390/include/asm/cache.h
4812 @@ -11,8 +11,10 @@
4813 #ifndef __ARCH_S390_CACHE_H
4814 #define __ARCH_S390_CACHE_H
4815
4816 -#define L1_CACHE_BYTES 256
4817 +#include <linux/const.h>
4818 +
4819 #define L1_CACHE_SHIFT 8
4820 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4821
4822 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
4823
4824 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4825 index e885442..e3a2817 100644
4826 --- a/arch/s390/include/asm/elf.h
4827 +++ b/arch/s390/include/asm/elf.h
4828 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4829 that it will "exec", and that there is sufficient room for the brk. */
4830 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4831
4832 +#ifdef CONFIG_PAX_ASLR
4833 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4834 +
4835 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4836 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4837 +#endif
4838 +
4839 /* This yields a mask that user programs can use to figure out what
4840 instruction set this CPU supports. */
4841
4842 diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4843 index e37478e..9ce0e9f 100644
4844 --- a/arch/s390/include/asm/setup.h
4845 +++ b/arch/s390/include/asm/setup.h
4846 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
4847 void detect_memory_layout(struct mem_chunk chunk[]);
4848
4849 #ifdef CONFIG_S390_SWITCH_AMODE
4850 -extern unsigned int switch_amode;
4851 +#define switch_amode (1)
4852 #else
4853 #define switch_amode (0)
4854 #endif
4855
4856 #ifdef CONFIG_S390_EXEC_PROTECT
4857 -extern unsigned int s390_noexec;
4858 +#define s390_noexec (1)
4859 #else
4860 #define s390_noexec (0)
4861 #endif
4862 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4863 index 8377e91..e28e6f1 100644
4864 --- a/arch/s390/include/asm/uaccess.h
4865 +++ b/arch/s390/include/asm/uaccess.h
4866 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
4867 copy_to_user(void __user *to, const void *from, unsigned long n)
4868 {
4869 might_fault();
4870 +
4871 + if ((long)n < 0)
4872 + return n;
4873 +
4874 if (access_ok(VERIFY_WRITE, to, n))
4875 n = __copy_to_user(to, from, n);
4876 return n;
4877 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4878 static inline unsigned long __must_check
4879 __copy_from_user(void *to, const void __user *from, unsigned long n)
4880 {
4881 + if ((long)n < 0)
4882 + return n;
4883 +
4884 if (__builtin_constant_p(n) && (n <= 256))
4885 return uaccess.copy_from_user_small(n, from, to);
4886 else
4887 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
4888 copy_from_user(void *to, const void __user *from, unsigned long n)
4889 {
4890 might_fault();
4891 +
4892 + if ((long)n < 0)
4893 + return n;
4894 +
4895 if (access_ok(VERIFY_READ, from, n))
4896 n = __copy_from_user(to, from, n);
4897 else
4898 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4899 index 639380a..72e3c02 100644
4900 --- a/arch/s390/kernel/module.c
4901 +++ b/arch/s390/kernel/module.c
4902 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4903
4904 /* Increase core size by size of got & plt and set start
4905 offsets for got and plt. */
4906 - me->core_size = ALIGN(me->core_size, 4);
4907 - me->arch.got_offset = me->core_size;
4908 - me->core_size += me->arch.got_size;
4909 - me->arch.plt_offset = me->core_size;
4910 - me->core_size += me->arch.plt_size;
4911 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4912 + me->arch.got_offset = me->core_size_rw;
4913 + me->core_size_rw += me->arch.got_size;
4914 + me->arch.plt_offset = me->core_size_rx;
4915 + me->core_size_rx += me->arch.plt_size;
4916 return 0;
4917 }
4918
4919 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4920 if (info->got_initialized == 0) {
4921 Elf_Addr *gotent;
4922
4923 - gotent = me->module_core + me->arch.got_offset +
4924 + gotent = me->module_core_rw + me->arch.got_offset +
4925 info->got_offset;
4926 *gotent = val;
4927 info->got_initialized = 1;
4928 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4929 else if (r_type == R_390_GOTENT ||
4930 r_type == R_390_GOTPLTENT)
4931 *(unsigned int *) loc =
4932 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4933 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4934 else if (r_type == R_390_GOT64 ||
4935 r_type == R_390_GOTPLT64)
4936 *(unsigned long *) loc = val;
4937 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4938 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4939 if (info->plt_initialized == 0) {
4940 unsigned int *ip;
4941 - ip = me->module_core + me->arch.plt_offset +
4942 + ip = me->module_core_rx + me->arch.plt_offset +
4943 info->plt_offset;
4944 #ifndef CONFIG_64BIT
4945 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4946 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4947 val - loc + 0xffffUL < 0x1ffffeUL) ||
4948 (r_type == R_390_PLT32DBL &&
4949 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4950 - val = (Elf_Addr) me->module_core +
4951 + val = (Elf_Addr) me->module_core_rx +
4952 me->arch.plt_offset +
4953 info->plt_offset;
4954 val += rela->r_addend - loc;
4955 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4956 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4957 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4958 val = val + rela->r_addend -
4959 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4960 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4961 if (r_type == R_390_GOTOFF16)
4962 *(unsigned short *) loc = val;
4963 else if (r_type == R_390_GOTOFF32)
4964 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4965 break;
4966 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4967 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4968 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4969 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4970 rela->r_addend - loc;
4971 if (r_type == R_390_GOTPC)
4972 *(unsigned int *) loc = val;
4973 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4974 index 358e545..051e4f4 100644
4975 --- a/arch/s390/kernel/setup.c
4976 +++ b/arch/s390/kernel/setup.c
4977 @@ -307,9 +307,6 @@ static int __init early_parse_mem(char *p)
4978 early_param("mem", early_parse_mem);
4979
4980 #ifdef CONFIG_S390_SWITCH_AMODE
4981 -unsigned int switch_amode = 0;
4982 -EXPORT_SYMBOL_GPL(switch_amode);
4983 -
4984 static int set_amode_and_uaccess(unsigned long user_amode,
4985 unsigned long user32_amode)
4986 {
4987 @@ -335,17 +332,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4988 return 0;
4989 }
4990 }
4991 -
4992 -/*
4993 - * Switch kernel/user addressing modes?
4994 - */
4995 -static int __init early_parse_switch_amode(char *p)
4996 -{
4997 - switch_amode = 1;
4998 - return 0;
4999 -}
5000 -early_param("switch_amode", early_parse_switch_amode);
5001 -
5002 #else /* CONFIG_S390_SWITCH_AMODE */
5003 static inline int set_amode_and_uaccess(unsigned long user_amode,
5004 unsigned long user32_amode)
5005 @@ -354,24 +340,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
5006 }
5007 #endif /* CONFIG_S390_SWITCH_AMODE */
5008
5009 -#ifdef CONFIG_S390_EXEC_PROTECT
5010 -unsigned int s390_noexec = 0;
5011 -EXPORT_SYMBOL_GPL(s390_noexec);
5012 -
5013 -/*
5014 - * Enable execute protection?
5015 - */
5016 -static int __init early_parse_noexec(char *p)
5017 -{
5018 - if (!strncmp(p, "off", 3))
5019 - return 0;
5020 - switch_amode = 1;
5021 - s390_noexec = 1;
5022 - return 0;
5023 -}
5024 -early_param("noexec", early_parse_noexec);
5025 -#endif /* CONFIG_S390_EXEC_PROTECT */
5026 -
5027 static void setup_addressing_mode(void)
5028 {
5029 if (s390_noexec) {
5030 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
5031 index 0ab74ae..c8b68f9 100644
5032 --- a/arch/s390/mm/mmap.c
5033 +++ b/arch/s390/mm/mmap.c
5034 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5035 */
5036 if (mmap_is_legacy()) {
5037 mm->mmap_base = TASK_UNMAPPED_BASE;
5038 +
5039 +#ifdef CONFIG_PAX_RANDMMAP
5040 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5041 + mm->mmap_base += mm->delta_mmap;
5042 +#endif
5043 +
5044 mm->get_unmapped_area = arch_get_unmapped_area;
5045 mm->unmap_area = arch_unmap_area;
5046 } else {
5047 mm->mmap_base = mmap_base();
5048 +
5049 +#ifdef CONFIG_PAX_RANDMMAP
5050 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5051 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5052 +#endif
5053 +
5054 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5055 mm->unmap_area = arch_unmap_area_topdown;
5056 }
5057 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5058 */
5059 if (mmap_is_legacy()) {
5060 mm->mmap_base = TASK_UNMAPPED_BASE;
5061 +
5062 +#ifdef CONFIG_PAX_RANDMMAP
5063 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5064 + mm->mmap_base += mm->delta_mmap;
5065 +#endif
5066 +
5067 mm->get_unmapped_area = s390_get_unmapped_area;
5068 mm->unmap_area = arch_unmap_area;
5069 } else {
5070 mm->mmap_base = mmap_base();
5071 +
5072 +#ifdef CONFIG_PAX_RANDMMAP
5073 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5074 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5075 +#endif
5076 +
5077 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
5078 mm->unmap_area = arch_unmap_area_topdown;
5079 }
5080 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
5081 index ae3d59f..f65f075 100644
5082 --- a/arch/score/include/asm/cache.h
5083 +++ b/arch/score/include/asm/cache.h
5084 @@ -1,7 +1,9 @@
5085 #ifndef _ASM_SCORE_CACHE_H
5086 #define _ASM_SCORE_CACHE_H
5087
5088 +#include <linux/const.h>
5089 +
5090 #define L1_CACHE_SHIFT 4
5091 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5092 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5093
5094 #endif /* _ASM_SCORE_CACHE_H */
5095 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
5096 index 589d5c7..669e274 100644
5097 --- a/arch/score/include/asm/system.h
5098 +++ b/arch/score/include/asm/system.h
5099 @@ -17,7 +17,7 @@ do { \
5100 #define finish_arch_switch(prev) do {} while (0)
5101
5102 typedef void (*vi_handler_t)(void);
5103 -extern unsigned long arch_align_stack(unsigned long sp);
5104 +#define arch_align_stack(x) (x)
5105
5106 #define mb() barrier()
5107 #define rmb() barrier()
5108 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5109 index 25d0803..d6c8e36 100644
5110 --- a/arch/score/kernel/process.c
5111 +++ b/arch/score/kernel/process.c
5112 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
5113
5114 return task_pt_regs(task)->cp0_epc;
5115 }
5116 -
5117 -unsigned long arch_align_stack(unsigned long sp)
5118 -{
5119 - return sp;
5120 -}
5121 diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
5122 index d936c1a..304a252 100644
5123 --- a/arch/sh/boards/mach-hp6xx/pm.c
5124 +++ b/arch/sh/boards/mach-hp6xx/pm.c
5125 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
5126 return 0;
5127 }
5128
5129 -static struct platform_suspend_ops hp6x0_pm_ops = {
5130 +static const struct platform_suspend_ops hp6x0_pm_ops = {
5131 .enter = hp6x0_pm_enter,
5132 .valid = suspend_valid_only_mem,
5133 };
5134 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5135 index 02df18e..ae3a793 100644
5136 --- a/arch/sh/include/asm/cache.h
5137 +++ b/arch/sh/include/asm/cache.h
5138 @@ -9,10 +9,11 @@
5139 #define __ASM_SH_CACHE_H
5140 #ifdef __KERNEL__
5141
5142 +#include <linux/const.h>
5143 #include <linux/init.h>
5144 #include <cpu/cache.h>
5145
5146 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5147 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5148
5149 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
5150
5151 diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
5152 index 8a8a993..7b3079b 100644
5153 --- a/arch/sh/kernel/cpu/sh4/sq.c
5154 +++ b/arch/sh/kernel/cpu/sh4/sq.c
5155 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
5156 NULL,
5157 };
5158
5159 -static struct sysfs_ops sq_sysfs_ops = {
5160 +static const struct sysfs_ops sq_sysfs_ops = {
5161 .show = sq_sysfs_show,
5162 .store = sq_sysfs_store,
5163 };
5164 diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
5165 index ee3c2aa..c49cee6 100644
5166 --- a/arch/sh/kernel/cpu/shmobile/pm.c
5167 +++ b/arch/sh/kernel/cpu/shmobile/pm.c
5168 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
5169 return 0;
5170 }
5171
5172 -static struct platform_suspend_ops sh_pm_ops = {
5173 +static const struct platform_suspend_ops sh_pm_ops = {
5174 .enter = sh_pm_enter,
5175 .valid = suspend_valid_only_mem,
5176 };
5177 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
5178 index 3e532d0..9faa306 100644
5179 --- a/arch/sh/kernel/kgdb.c
5180 +++ b/arch/sh/kernel/kgdb.c
5181 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
5182 {
5183 }
5184
5185 -struct kgdb_arch arch_kgdb_ops = {
5186 +const struct kgdb_arch arch_kgdb_ops = {
5187 /* Breakpoint instruction: trapa #0x3c */
5188 #ifdef CONFIG_CPU_LITTLE_ENDIAN
5189 .gdb_bpt_instr = { 0x3c, 0xc3 },
5190 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5191 index afeb710..d1d1289 100644
5192 --- a/arch/sh/mm/mmap.c
5193 +++ b/arch/sh/mm/mmap.c
5194 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5195 addr = PAGE_ALIGN(addr);
5196
5197 vma = find_vma(mm, addr);
5198 - if (TASK_SIZE - len >= addr &&
5199 - (!vma || addr + len <= vma->vm_start))
5200 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5201 return addr;
5202 }
5203
5204 @@ -106,7 +105,7 @@ full_search:
5205 }
5206 return -ENOMEM;
5207 }
5208 - if (likely(!vma || addr + len <= vma->vm_start)) {
5209 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5210 /*
5211 * Remember the place where we stopped the search:
5212 */
5213 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5214 addr = PAGE_ALIGN(addr);
5215
5216 vma = find_vma(mm, addr);
5217 - if (TASK_SIZE - len >= addr &&
5218 - (!vma || addr + len <= vma->vm_start))
5219 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5220 return addr;
5221 }
5222
5223 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5224 /* make sure it can fit in the remaining address space */
5225 if (likely(addr > len)) {
5226 vma = find_vma(mm, addr-len);
5227 - if (!vma || addr <= vma->vm_start) {
5228 + if (check_heap_stack_gap(vma, addr - len, len)) {
5229 /* remember the address as a hint for next time */
5230 return (mm->free_area_cache = addr-len);
5231 }
5232 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5233 if (unlikely(mm->mmap_base < len))
5234 goto bottomup;
5235
5236 - addr = mm->mmap_base-len;
5237 - if (do_colour_align)
5238 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5239 + addr = mm->mmap_base - len;
5240
5241 do {
5242 + if (do_colour_align)
5243 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5244 /*
5245 * Lookup failure means no vma is above this address,
5246 * else if new region fits below vma->vm_start,
5247 * return with success:
5248 */
5249 vma = find_vma(mm, addr);
5250 - if (likely(!vma || addr+len <= vma->vm_start)) {
5251 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5252 /* remember the address as a hint for next time */
5253 return (mm->free_area_cache = addr);
5254 }
5255 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5256 mm->cached_hole_size = vma->vm_start - addr;
5257
5258 /* try just below the current vma->vm_start */
5259 - addr = vma->vm_start-len;
5260 - if (do_colour_align)
5261 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5262 - } while (likely(len < vma->vm_start));
5263 + addr = skip_heap_stack_gap(vma, len);
5264 + } while (!IS_ERR_VALUE(addr));
5265
5266 bottomup:
5267 /*
5268 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
5269 index 05ef538..dc9c857 100644
5270 --- a/arch/sparc/Kconfig
5271 +++ b/arch/sparc/Kconfig
5272 @@ -32,6 +32,7 @@ config SPARC
5273
5274 config SPARC32
5275 def_bool !64BIT
5276 + select GENERIC_ATOMIC64
5277
5278 config SPARC64
5279 def_bool 64BIT
5280 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5281 index 113225b..7fd04e7 100644
5282 --- a/arch/sparc/Makefile
5283 +++ b/arch/sparc/Makefile
5284 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5285 # Export what is needed by arch/sparc/boot/Makefile
5286 export VMLINUX_INIT VMLINUX_MAIN
5287 VMLINUX_INIT := $(head-y) $(init-y)
5288 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5289 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5290 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5291 VMLINUX_MAIN += $(drivers-y) $(net-y)
5292
5293 diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
5294 index f0d343c..cf36e68 100644
5295 --- a/arch/sparc/include/asm/atomic_32.h
5296 +++ b/arch/sparc/include/asm/atomic_32.h
5297 @@ -13,6 +13,8 @@
5298
5299 #include <linux/types.h>
5300
5301 +#include <asm-generic/atomic64.h>
5302 +
5303 #ifdef __KERNEL__
5304
5305 #include <asm/system.h>
5306 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5307 index f5cc06f..f858d47 100644
5308 --- a/arch/sparc/include/asm/atomic_64.h
5309 +++ b/arch/sparc/include/asm/atomic_64.h
5310 @@ -14,18 +14,40 @@
5311 #define ATOMIC64_INIT(i) { (i) }
5312
5313 #define atomic_read(v) ((v)->counter)
5314 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5315 +{
5316 + return v->counter;
5317 +}
5318 #define atomic64_read(v) ((v)->counter)
5319 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5320 +{
5321 + return v->counter;
5322 +}
5323
5324 #define atomic_set(v, i) (((v)->counter) = i)
5325 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5326 +{
5327 + v->counter = i;
5328 +}
5329 #define atomic64_set(v, i) (((v)->counter) = i)
5330 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5331 +{
5332 + v->counter = i;
5333 +}
5334
5335 extern void atomic_add(int, atomic_t *);
5336 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5337 extern void atomic64_add(long, atomic64_t *);
5338 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5339 extern void atomic_sub(int, atomic_t *);
5340 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5341 extern void atomic64_sub(long, atomic64_t *);
5342 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5343
5344 extern int atomic_add_ret(int, atomic_t *);
5345 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5346 extern long atomic64_add_ret(long, atomic64_t *);
5347 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5348 extern int atomic_sub_ret(int, atomic_t *);
5349 extern long atomic64_sub_ret(long, atomic64_t *);
5350
5351 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5352 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5353
5354 #define atomic_inc_return(v) atomic_add_ret(1, v)
5355 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5356 +{
5357 + return atomic_add_ret_unchecked(1, v);
5358 +}
5359 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5360 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5361 +{
5362 + return atomic64_add_ret_unchecked(1, v);
5363 +}
5364
5365 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5366 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5367
5368 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5369 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5370 +{
5371 + return atomic_add_ret_unchecked(i, v);
5372 +}
5373 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5374 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5375 +{
5376 + return atomic64_add_ret_unchecked(i, v);
5377 +}
5378
5379 /*
5380 * atomic_inc_and_test - increment and test
5381 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5382 * other cases.
5383 */
5384 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5385 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5386 +{
5387 + return atomic_inc_return_unchecked(v) == 0;
5388 +}
5389 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5390
5391 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5392 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5393 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5394
5395 #define atomic_inc(v) atomic_add(1, v)
5396 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5397 +{
5398 + atomic_add_unchecked(1, v);
5399 +}
5400 #define atomic64_inc(v) atomic64_add(1, v)
5401 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5402 +{
5403 + atomic64_add_unchecked(1, v);
5404 +}
5405
5406 #define atomic_dec(v) atomic_sub(1, v)
5407 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5408 +{
5409 + atomic_sub_unchecked(1, v);
5410 +}
5411 #define atomic64_dec(v) atomic64_sub(1, v)
5412 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5413 +{
5414 + atomic64_sub_unchecked(1, v);
5415 +}
5416
5417 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5418 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5419
5420 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5421 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5422 +{
5423 + return cmpxchg(&v->counter, old, new);
5424 +}
5425 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5426 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5427 +{
5428 + return xchg(&v->counter, new);
5429 +}
5430
5431 static inline int atomic_add_unless(atomic_t *v, int a, int u)
5432 {
5433 - int c, old;
5434 + int c, old, new;
5435 c = atomic_read(v);
5436 for (;;) {
5437 - if (unlikely(c == (u)))
5438 + if (unlikely(c == u))
5439 break;
5440 - old = atomic_cmpxchg((v), c, c + (a));
5441 +
5442 + asm volatile("addcc %2, %0, %0\n"
5443 +
5444 +#ifdef CONFIG_PAX_REFCOUNT
5445 + "tvs %%icc, 6\n"
5446 +#endif
5447 +
5448 + : "=r" (new)
5449 + : "0" (c), "ir" (a)
5450 + : "cc");
5451 +
5452 + old = atomic_cmpxchg(v, c, new);
5453 if (likely(old == c))
5454 break;
5455 c = old;
5456 }
5457 - return c != (u);
5458 + return c != u;
5459 }
5460
5461 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
5462 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
5463 #define atomic64_cmpxchg(v, o, n) \
5464 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5465 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5466 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5467 +{
5468 + return xchg(&v->counter, new);
5469 +}
5470
5471 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5472 {
5473 - long c, old;
5474 + long c, old, new;
5475 c = atomic64_read(v);
5476 for (;;) {
5477 - if (unlikely(c == (u)))
5478 + if (unlikely(c == u))
5479 break;
5480 - old = atomic64_cmpxchg((v), c, c + (a));
5481 +
5482 + asm volatile("addcc %2, %0, %0\n"
5483 +
5484 +#ifdef CONFIG_PAX_REFCOUNT
5485 + "tvs %%xcc, 6\n"
5486 +#endif
5487 +
5488 + : "=r" (new)
5489 + : "0" (c), "ir" (a)
5490 + : "cc");
5491 +
5492 + old = atomic64_cmpxchg(v, c, new);
5493 if (likely(old == c))
5494 break;
5495 c = old;
5496 }
5497 - return c != (u);
5498 + return c != u;
5499 }
5500
5501 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5502 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5503 index 41f85ae..73b80b5 100644
5504 --- a/arch/sparc/include/asm/cache.h
5505 +++ b/arch/sparc/include/asm/cache.h
5506 @@ -7,8 +7,10 @@
5507 #ifndef _SPARC_CACHE_H
5508 #define _SPARC_CACHE_H
5509
5510 +#include <linux/const.h>
5511 +
5512 #define L1_CACHE_SHIFT 5
5513 -#define L1_CACHE_BYTES 32
5514 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5515 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
5516
5517 #ifdef CONFIG_SPARC32
5518 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
5519 index 5a8c308..38def92 100644
5520 --- a/arch/sparc/include/asm/dma-mapping.h
5521 +++ b/arch/sparc/include/asm/dma-mapping.h
5522 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
5523 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
5524 #define dma_is_consistent(d, h) (1)
5525
5526 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
5527 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
5528 extern struct bus_type pci_bus_type;
5529
5530 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5531 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
5532 {
5533 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
5534 if (dev->bus == &pci_bus_type)
5535 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5536 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5537 dma_addr_t *dma_handle, gfp_t flag)
5538 {
5539 - struct dma_map_ops *ops = get_dma_ops(dev);
5540 + const struct dma_map_ops *ops = get_dma_ops(dev);
5541 void *cpu_addr;
5542
5543 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
5544 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5545 static inline void dma_free_coherent(struct device *dev, size_t size,
5546 void *cpu_addr, dma_addr_t dma_handle)
5547 {
5548 - struct dma_map_ops *ops = get_dma_ops(dev);
5549 + const struct dma_map_ops *ops = get_dma_ops(dev);
5550
5551 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
5552 ops->free_coherent(dev, size, cpu_addr, dma_handle);
5553 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5554 index 381a1b5..b97e3ff 100644
5555 --- a/arch/sparc/include/asm/elf_32.h
5556 +++ b/arch/sparc/include/asm/elf_32.h
5557 @@ -116,6 +116,13 @@ typedef struct {
5558
5559 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5560
5561 +#ifdef CONFIG_PAX_ASLR
5562 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5563 +
5564 +#define PAX_DELTA_MMAP_LEN 16
5565 +#define PAX_DELTA_STACK_LEN 16
5566 +#endif
5567 +
5568 /* This yields a mask that user programs can use to figure out what
5569 instruction set this cpu supports. This can NOT be done in userspace
5570 on Sparc. */
5571 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5572 index 9968085..c2106ef 100644
5573 --- a/arch/sparc/include/asm/elf_64.h
5574 +++ b/arch/sparc/include/asm/elf_64.h
5575 @@ -163,6 +163,12 @@ typedef struct {
5576 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5577 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5578
5579 +#ifdef CONFIG_PAX_ASLR
5580 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5581 +
5582 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5583 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5584 +#endif
5585
5586 /* This yields a mask that user programs can use to figure out what
5587 instruction set this cpu supports. */
5588 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
5589 index 156707b..aefa786 100644
5590 --- a/arch/sparc/include/asm/page_32.h
5591 +++ b/arch/sparc/include/asm/page_32.h
5592 @@ -8,6 +8,8 @@
5593 #ifndef _SPARC_PAGE_H
5594 #define _SPARC_PAGE_H
5595
5596 +#include <linux/const.h>
5597 +
5598 #define PAGE_SHIFT 12
5599
5600 #ifndef __ASSEMBLY__
5601 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5602 index e0cabe7..efd60f1 100644
5603 --- a/arch/sparc/include/asm/pgtable_32.h
5604 +++ b/arch/sparc/include/asm/pgtable_32.h
5605 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5606 BTFIXUPDEF_INT(page_none)
5607 BTFIXUPDEF_INT(page_copy)
5608 BTFIXUPDEF_INT(page_readonly)
5609 +
5610 +#ifdef CONFIG_PAX_PAGEEXEC
5611 +BTFIXUPDEF_INT(page_shared_noexec)
5612 +BTFIXUPDEF_INT(page_copy_noexec)
5613 +BTFIXUPDEF_INT(page_readonly_noexec)
5614 +#endif
5615 +
5616 BTFIXUPDEF_INT(page_kernel)
5617
5618 #define PMD_SHIFT SUN4C_PMD_SHIFT
5619 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
5620 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5621 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5622
5623 +#ifdef CONFIG_PAX_PAGEEXEC
5624 +extern pgprot_t PAGE_SHARED_NOEXEC;
5625 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5626 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5627 +#else
5628 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5629 +# define PAGE_COPY_NOEXEC PAGE_COPY
5630 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5631 +#endif
5632 +
5633 extern unsigned long page_kernel;
5634
5635 #ifdef MODULE
5636 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5637 index 1407c07..7e10231 100644
5638 --- a/arch/sparc/include/asm/pgtsrmmu.h
5639 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5640 @@ -115,6 +115,13 @@
5641 SRMMU_EXEC | SRMMU_REF)
5642 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5643 SRMMU_EXEC | SRMMU_REF)
5644 +
5645 +#ifdef CONFIG_PAX_PAGEEXEC
5646 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5647 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5648 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5649 +#endif
5650 +
5651 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5652 SRMMU_DIRTY | SRMMU_REF)
5653
5654 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5655 index 43e5147..47622a1 100644
5656 --- a/arch/sparc/include/asm/spinlock_64.h
5657 +++ b/arch/sparc/include/asm/spinlock_64.h
5658 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
5659
5660 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5661
5662 -static void inline arch_read_lock(raw_rwlock_t *lock)
5663 +static inline void arch_read_lock(raw_rwlock_t *lock)
5664 {
5665 unsigned long tmp1, tmp2;
5666
5667 __asm__ __volatile__ (
5668 "1: ldsw [%2], %0\n"
5669 " brlz,pn %0, 2f\n"
5670 -"4: add %0, 1, %1\n"
5671 +"4: addcc %0, 1, %1\n"
5672 +
5673 +#ifdef CONFIG_PAX_REFCOUNT
5674 +" tvs %%icc, 6\n"
5675 +#endif
5676 +
5677 " cas [%2], %0, %1\n"
5678 " cmp %0, %1\n"
5679 " bne,pn %%icc, 1b\n"
5680 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
5681 " .previous"
5682 : "=&r" (tmp1), "=&r" (tmp2)
5683 : "r" (lock)
5684 - : "memory");
5685 + : "memory", "cc");
5686 }
5687
5688 -static int inline arch_read_trylock(raw_rwlock_t *lock)
5689 +static inline int arch_read_trylock(raw_rwlock_t *lock)
5690 {
5691 int tmp1, tmp2;
5692
5693 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5694 "1: ldsw [%2], %0\n"
5695 " brlz,a,pn %0, 2f\n"
5696 " mov 0, %0\n"
5697 -" add %0, 1, %1\n"
5698 +" addcc %0, 1, %1\n"
5699 +
5700 +#ifdef CONFIG_PAX_REFCOUNT
5701 +" tvs %%icc, 6\n"
5702 +#endif
5703 +
5704 " cas [%2], %0, %1\n"
5705 " cmp %0, %1\n"
5706 " bne,pn %%icc, 1b\n"
5707 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5708 return tmp1;
5709 }
5710
5711 -static void inline arch_read_unlock(raw_rwlock_t *lock)
5712 +static inline void arch_read_unlock(raw_rwlock_t *lock)
5713 {
5714 unsigned long tmp1, tmp2;
5715
5716 __asm__ __volatile__(
5717 "1: lduw [%2], %0\n"
5718 -" sub %0, 1, %1\n"
5719 +" subcc %0, 1, %1\n"
5720 +
5721 +#ifdef CONFIG_PAX_REFCOUNT
5722 +" tvs %%icc, 6\n"
5723 +#endif
5724 +
5725 " cas [%2], %0, %1\n"
5726 " cmp %0, %1\n"
5727 " bne,pn %%xcc, 1b\n"
5728 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
5729 : "memory");
5730 }
5731
5732 -static void inline arch_write_lock(raw_rwlock_t *lock)
5733 +static inline void arch_write_lock(raw_rwlock_t *lock)
5734 {
5735 unsigned long mask, tmp1, tmp2;
5736
5737 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
5738 : "memory");
5739 }
5740
5741 -static void inline arch_write_unlock(raw_rwlock_t *lock)
5742 +static inline void arch_write_unlock(raw_rwlock_t *lock)
5743 {
5744 __asm__ __volatile__(
5745 " stw %%g0, [%0]"
5746 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
5747 : "memory");
5748 }
5749
5750 -static int inline arch_write_trylock(raw_rwlock_t *lock)
5751 +static inline int arch_write_trylock(raw_rwlock_t *lock)
5752 {
5753 unsigned long mask, tmp1, tmp2, result;
5754
5755 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5756 index 844d73a..f787fb9 100644
5757 --- a/arch/sparc/include/asm/thread_info_32.h
5758 +++ b/arch/sparc/include/asm/thread_info_32.h
5759 @@ -50,6 +50,8 @@ struct thread_info {
5760 unsigned long w_saved;
5761
5762 struct restart_block restart_block;
5763 +
5764 + unsigned long lowest_stack;
5765 };
5766
5767 /*
5768 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5769 index f78ad9a..9f55fc7 100644
5770 --- a/arch/sparc/include/asm/thread_info_64.h
5771 +++ b/arch/sparc/include/asm/thread_info_64.h
5772 @@ -68,6 +68,8 @@ struct thread_info {
5773 struct pt_regs *kern_una_regs;
5774 unsigned int kern_una_insn;
5775
5776 + unsigned long lowest_stack;
5777 +
5778 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5779 };
5780
5781 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5782 index e88fbe5..96b0ce5 100644
5783 --- a/arch/sparc/include/asm/uaccess.h
5784 +++ b/arch/sparc/include/asm/uaccess.h
5785 @@ -1,5 +1,13 @@
5786 #ifndef ___ASM_SPARC_UACCESS_H
5787 #define ___ASM_SPARC_UACCESS_H
5788 +
5789 +#ifdef __KERNEL__
5790 +#ifndef __ASSEMBLY__
5791 +#include <linux/types.h>
5792 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5793 +#endif
5794 +#endif
5795 +
5796 #if defined(__sparc__) && defined(__arch64__)
5797 #include <asm/uaccess_64.h>
5798 #else
5799 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5800 index 8303ac4..07f333d 100644
5801 --- a/arch/sparc/include/asm/uaccess_32.h
5802 +++ b/arch/sparc/include/asm/uaccess_32.h
5803 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5804
5805 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5806 {
5807 - if (n && __access_ok((unsigned long) to, n))
5808 + if ((long)n < 0)
5809 + return n;
5810 +
5811 + if (n && __access_ok((unsigned long) to, n)) {
5812 + if (!__builtin_constant_p(n))
5813 + check_object_size(from, n, true);
5814 return __copy_user(to, (__force void __user *) from, n);
5815 - else
5816 + } else
5817 return n;
5818 }
5819
5820 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5821 {
5822 + if ((long)n < 0)
5823 + return n;
5824 +
5825 + if (!__builtin_constant_p(n))
5826 + check_object_size(from, n, true);
5827 +
5828 return __copy_user(to, (__force void __user *) from, n);
5829 }
5830
5831 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5832 {
5833 - if (n && __access_ok((unsigned long) from, n))
5834 + if ((long)n < 0)
5835 + return n;
5836 +
5837 + if (n && __access_ok((unsigned long) from, n)) {
5838 + if (!__builtin_constant_p(n))
5839 + check_object_size(to, n, false);
5840 return __copy_user((__force void __user *) to, from, n);
5841 - else
5842 + } else
5843 return n;
5844 }
5845
5846 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5847 {
5848 + if ((long)n < 0)
5849 + return n;
5850 +
5851 return __copy_user((__force void __user *) to, from, n);
5852 }
5853
5854 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5855 index 9ea271e..7b8a271 100644
5856 --- a/arch/sparc/include/asm/uaccess_64.h
5857 +++ b/arch/sparc/include/asm/uaccess_64.h
5858 @@ -9,6 +9,7 @@
5859 #include <linux/compiler.h>
5860 #include <linux/string.h>
5861 #include <linux/thread_info.h>
5862 +#include <linux/kernel.h>
5863 #include <asm/asi.h>
5864 #include <asm/system.h>
5865 #include <asm/spitfire.h>
5866 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5867 static inline unsigned long __must_check
5868 copy_from_user(void *to, const void __user *from, unsigned long size)
5869 {
5870 - unsigned long ret = ___copy_from_user(to, from, size);
5871 + unsigned long ret;
5872
5873 + if ((long)size < 0 || size > INT_MAX)
5874 + return size;
5875 +
5876 + if (!__builtin_constant_p(size))
5877 + check_object_size(to, size, false);
5878 +
5879 + ret = ___copy_from_user(to, from, size);
5880 if (unlikely(ret))
5881 ret = copy_from_user_fixup(to, from, size);
5882 return ret;
5883 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5884 static inline unsigned long __must_check
5885 copy_to_user(void __user *to, const void *from, unsigned long size)
5886 {
5887 - unsigned long ret = ___copy_to_user(to, from, size);
5888 + unsigned long ret;
5889
5890 + if ((long)size < 0 || size > INT_MAX)
5891 + return size;
5892 +
5893 + if (!__builtin_constant_p(size))
5894 + check_object_size(from, size, true);
5895 +
5896 + ret = ___copy_to_user(to, from, size);
5897 if (unlikely(ret))
5898 ret = copy_to_user_fixup(to, from, size);
5899 return ret;
5900 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5901 index 2782681..77ded84 100644
5902 --- a/arch/sparc/kernel/Makefile
5903 +++ b/arch/sparc/kernel/Makefile
5904 @@ -3,7 +3,7 @@
5905 #
5906
5907 asflags-y := -ansi
5908 -ccflags-y := -Werror
5909 +#ccflags-y := -Werror
5910
5911 extra-y := head_$(BITS).o
5912 extra-y += init_task.o
5913 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5914 index 7690cc2..ece64c9 100644
5915 --- a/arch/sparc/kernel/iommu.c
5916 +++ b/arch/sparc/kernel/iommu.c
5917 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5918 spin_unlock_irqrestore(&iommu->lock, flags);
5919 }
5920
5921 -static struct dma_map_ops sun4u_dma_ops = {
5922 +static const struct dma_map_ops sun4u_dma_ops = {
5923 .alloc_coherent = dma_4u_alloc_coherent,
5924 .free_coherent = dma_4u_free_coherent,
5925 .map_page = dma_4u_map_page,
5926 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5927 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5928 };
5929
5930 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5931 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5932 EXPORT_SYMBOL(dma_ops);
5933
5934 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5935 diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5936 index 9f61fd8..bd048db 100644
5937 --- a/arch/sparc/kernel/ioport.c
5938 +++ b/arch/sparc/kernel/ioport.c
5939 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5940 BUG();
5941 }
5942
5943 -struct dma_map_ops sbus_dma_ops = {
5944 +const struct dma_map_ops sbus_dma_ops = {
5945 .alloc_coherent = sbus_alloc_coherent,
5946 .free_coherent = sbus_free_coherent,
5947 .map_page = sbus_map_page,
5948 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5949 .sync_sg_for_device = sbus_sync_sg_for_device,
5950 };
5951
5952 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
5953 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5954 EXPORT_SYMBOL(dma_ops);
5955
5956 static int __init sparc_register_ioport(void)
5957 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5958 }
5959 }
5960
5961 -struct dma_map_ops pci32_dma_ops = {
5962 +const struct dma_map_ops pci32_dma_ops = {
5963 .alloc_coherent = pci32_alloc_coherent,
5964 .free_coherent = pci32_free_coherent,
5965 .map_page = pci32_map_page,
5966 diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5967 index 04df4ed..55c4b6e 100644
5968 --- a/arch/sparc/kernel/kgdb_32.c
5969 +++ b/arch/sparc/kernel/kgdb_32.c
5970 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5971 {
5972 }
5973
5974 -struct kgdb_arch arch_kgdb_ops = {
5975 +const struct kgdb_arch arch_kgdb_ops = {
5976 /* Breakpoint instruction: ta 0x7d */
5977 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5978 };
5979 diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5980 index f5a0fd4..d886f71 100644
5981 --- a/arch/sparc/kernel/kgdb_64.c
5982 +++ b/arch/sparc/kernel/kgdb_64.c
5983 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5984 {
5985 }
5986
5987 -struct kgdb_arch arch_kgdb_ops = {
5988 +const struct kgdb_arch arch_kgdb_ops = {
5989 /* Breakpoint instruction: ta 0x72 */
5990 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5991 };
5992 diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5993 index 23c33ff..d137fbd 100644
5994 --- a/arch/sparc/kernel/pci_sun4v.c
5995 +++ b/arch/sparc/kernel/pci_sun4v.c
5996 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5997 spin_unlock_irqrestore(&iommu->lock, flags);
5998 }
5999
6000 -static struct dma_map_ops sun4v_dma_ops = {
6001 +static const struct dma_map_ops sun4v_dma_ops = {
6002 .alloc_coherent = dma_4v_alloc_coherent,
6003 .free_coherent = dma_4v_free_coherent,
6004 .map_page = dma_4v_map_page,
6005 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
6006 index c49865b..b41a81b 100644
6007 --- a/arch/sparc/kernel/process_32.c
6008 +++ b/arch/sparc/kernel/process_32.c
6009 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
6010 rw->ins[4], rw->ins[5],
6011 rw->ins[6],
6012 rw->ins[7]);
6013 - printk("%pS\n", (void *) rw->ins[7]);
6014 + printk("%pA\n", (void *) rw->ins[7]);
6015 rw = (struct reg_window32 *) rw->ins[6];
6016 }
6017 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
6018 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
6019
6020 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
6021 r->psr, r->pc, r->npc, r->y, print_tainted());
6022 - printk("PC: <%pS>\n", (void *) r->pc);
6023 + printk("PC: <%pA>\n", (void *) r->pc);
6024 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6025 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
6026 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
6027 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6028 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
6029 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
6030 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
6031 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
6032
6033 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6034 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
6035 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6036 rw = (struct reg_window32 *) fp;
6037 pc = rw->ins[7];
6038 printk("[%08lx : ", pc);
6039 - printk("%pS ] ", (void *) pc);
6040 + printk("%pA ] ", (void *) pc);
6041 fp = rw->ins[6];
6042 } while (++count < 16);
6043 printk("\n");
6044 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
6045 index cb70476..3d0c191 100644
6046 --- a/arch/sparc/kernel/process_64.c
6047 +++ b/arch/sparc/kernel/process_64.c
6048 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
6049 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
6050 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
6051 if (regs->tstate & TSTATE_PRIV)
6052 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
6053 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
6054 }
6055
6056 void show_regs(struct pt_regs *regs)
6057 {
6058 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
6059 regs->tpc, regs->tnpc, regs->y, print_tainted());
6060 - printk("TPC: <%pS>\n", (void *) regs->tpc);
6061 + printk("TPC: <%pA>\n", (void *) regs->tpc);
6062 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
6063 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
6064 regs->u_regs[3]);
6065 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
6066 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
6067 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
6068 regs->u_regs[15]);
6069 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
6070 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
6071 show_regwindow(regs);
6072 }
6073
6074 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
6075 ((tp && tp->task) ? tp->task->pid : -1));
6076
6077 if (gp->tstate & TSTATE_PRIV) {
6078 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
6079 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
6080 (void *) gp->tpc,
6081 (void *) gp->o7,
6082 (void *) gp->i7,
6083 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
6084 index 6edc4e5..06a69b4 100644
6085 --- a/arch/sparc/kernel/sigutil_64.c
6086 +++ b/arch/sparc/kernel/sigutil_64.c
6087 @@ -2,6 +2,7 @@
6088 #include <linux/types.h>
6089 #include <linux/thread_info.h>
6090 #include <linux/uaccess.h>
6091 +#include <linux/errno.h>
6092
6093 #include <asm/sigcontext.h>
6094 #include <asm/fpumacro.h>
6095 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
6096 index 3a82e65..ce0a53a 100644
6097 --- a/arch/sparc/kernel/sys_sparc_32.c
6098 +++ b/arch/sparc/kernel/sys_sparc_32.c
6099 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6100 if (ARCH_SUN4C && len > 0x20000000)
6101 return -ENOMEM;
6102 if (!addr)
6103 - addr = TASK_UNMAPPED_BASE;
6104 + addr = current->mm->mmap_base;
6105
6106 if (flags & MAP_SHARED)
6107 addr = COLOUR_ALIGN(addr);
6108 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6109 }
6110 if (TASK_SIZE - PAGE_SIZE - len < addr)
6111 return -ENOMEM;
6112 - if (!vmm || addr + len <= vmm->vm_start)
6113 + if (check_heap_stack_gap(vmm, addr, len))
6114 return addr;
6115 addr = vmm->vm_end;
6116 if (flags & MAP_SHARED)
6117 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
6118 index cfa0e19..98972ac 100644
6119 --- a/arch/sparc/kernel/sys_sparc_64.c
6120 +++ b/arch/sparc/kernel/sys_sparc_64.c
6121 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6122 /* We do not accept a shared mapping if it would violate
6123 * cache aliasing constraints.
6124 */
6125 - if ((flags & MAP_SHARED) &&
6126 + if ((filp || (flags & MAP_SHARED)) &&
6127 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6128 return -EINVAL;
6129 return addr;
6130 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6131 if (filp || (flags & MAP_SHARED))
6132 do_color_align = 1;
6133
6134 +#ifdef CONFIG_PAX_RANDMMAP
6135 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6136 +#endif
6137 +
6138 if (addr) {
6139 if (do_color_align)
6140 addr = COLOUR_ALIGN(addr, pgoff);
6141 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6142 addr = PAGE_ALIGN(addr);
6143
6144 vma = find_vma(mm, addr);
6145 - if (task_size - len >= addr &&
6146 - (!vma || addr + len <= vma->vm_start))
6147 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6148 return addr;
6149 }
6150
6151 if (len > mm->cached_hole_size) {
6152 - start_addr = addr = mm->free_area_cache;
6153 + start_addr = addr = mm->free_area_cache;
6154 } else {
6155 - start_addr = addr = TASK_UNMAPPED_BASE;
6156 + start_addr = addr = mm->mmap_base;
6157 mm->cached_hole_size = 0;
6158 }
6159
6160 @@ -175,14 +178,14 @@ full_search:
6161 vma = find_vma(mm, VA_EXCLUDE_END);
6162 }
6163 if (unlikely(task_size < addr)) {
6164 - if (start_addr != TASK_UNMAPPED_BASE) {
6165 - start_addr = addr = TASK_UNMAPPED_BASE;
6166 + if (start_addr != mm->mmap_base) {
6167 + start_addr = addr = mm->mmap_base;
6168 mm->cached_hole_size = 0;
6169 goto full_search;
6170 }
6171 return -ENOMEM;
6172 }
6173 - if (likely(!vma || addr + len <= vma->vm_start)) {
6174 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6175 /*
6176 * Remember the place where we stopped the search:
6177 */
6178 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6179 /* We do not accept a shared mapping if it would violate
6180 * cache aliasing constraints.
6181 */
6182 - if ((flags & MAP_SHARED) &&
6183 + if ((filp || (flags & MAP_SHARED)) &&
6184 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6185 return -EINVAL;
6186 return addr;
6187 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6188 addr = PAGE_ALIGN(addr);
6189
6190 vma = find_vma(mm, addr);
6191 - if (task_size - len >= addr &&
6192 - (!vma || addr + len <= vma->vm_start))
6193 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6194 return addr;
6195 }
6196
6197 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6198 /* make sure it can fit in the remaining address space */
6199 if (likely(addr > len)) {
6200 vma = find_vma(mm, addr-len);
6201 - if (!vma || addr <= vma->vm_start) {
6202 + if (check_heap_stack_gap(vma, addr - len, len)) {
6203 /* remember the address as a hint for next time */
6204 return (mm->free_area_cache = addr-len);
6205 }
6206 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6207 if (unlikely(mm->mmap_base < len))
6208 goto bottomup;
6209
6210 - addr = mm->mmap_base-len;
6211 - if (do_color_align)
6212 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6213 + addr = mm->mmap_base - len;
6214
6215 do {
6216 + if (do_color_align)
6217 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6218 /*
6219 * Lookup failure means no vma is above this address,
6220 * else if new region fits below vma->vm_start,
6221 * return with success:
6222 */
6223 vma = find_vma(mm, addr);
6224 - if (likely(!vma || addr+len <= vma->vm_start)) {
6225 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6226 /* remember the address as a hint for next time */
6227 return (mm->free_area_cache = addr);
6228 }
6229 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6230 mm->cached_hole_size = vma->vm_start - addr;
6231
6232 /* try just below the current vma->vm_start */
6233 - addr = vma->vm_start-len;
6234 - if (do_color_align)
6235 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6236 - } while (likely(len < vma->vm_start));
6237 + addr = skip_heap_stack_gap(vma, len);
6238 + } while (!IS_ERR_VALUE(addr));
6239
6240 bottomup:
6241 /*
6242 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6243 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
6244 sysctl_legacy_va_layout) {
6245 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6246 +
6247 +#ifdef CONFIG_PAX_RANDMMAP
6248 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6249 + mm->mmap_base += mm->delta_mmap;
6250 +#endif
6251 +
6252 mm->get_unmapped_area = arch_get_unmapped_area;
6253 mm->unmap_area = arch_unmap_area;
6254 } else {
6255 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6256 gap = (task_size / 6 * 5);
6257
6258 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6259 +
6260 +#ifdef CONFIG_PAX_RANDMMAP
6261 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6262 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6263 +#endif
6264 +
6265 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6266 mm->unmap_area = arch_unmap_area_topdown;
6267 }
6268 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6269 index c0490c7..84959d1 100644
6270 --- a/arch/sparc/kernel/traps_32.c
6271 +++ b/arch/sparc/kernel/traps_32.c
6272 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6273 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6274 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6275
6276 +extern void gr_handle_kernel_exploit(void);
6277 +
6278 void die_if_kernel(char *str, struct pt_regs *regs)
6279 {
6280 static int die_counter;
6281 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6282 count++ < 30 &&
6283 (((unsigned long) rw) >= PAGE_OFFSET) &&
6284 !(((unsigned long) rw) & 0x7)) {
6285 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
6286 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
6287 (void *) rw->ins[7]);
6288 rw = (struct reg_window32 *)rw->ins[6];
6289 }
6290 }
6291 printk("Instruction DUMP:");
6292 instruction_dump ((unsigned long *) regs->pc);
6293 - if(regs->psr & PSR_PS)
6294 + if(regs->psr & PSR_PS) {
6295 + gr_handle_kernel_exploit();
6296 do_exit(SIGKILL);
6297 + }
6298 do_exit(SIGSEGV);
6299 }
6300
6301 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6302 index 10f7bb9..cdb6793 100644
6303 --- a/arch/sparc/kernel/traps_64.c
6304 +++ b/arch/sparc/kernel/traps_64.c
6305 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6306 i + 1,
6307 p->trapstack[i].tstate, p->trapstack[i].tpc,
6308 p->trapstack[i].tnpc, p->trapstack[i].tt);
6309 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6310 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6311 }
6312 }
6313
6314 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6315
6316 lvl -= 0x100;
6317 if (regs->tstate & TSTATE_PRIV) {
6318 +
6319 +#ifdef CONFIG_PAX_REFCOUNT
6320 + if (lvl == 6)
6321 + pax_report_refcount_overflow(regs);
6322 +#endif
6323 +
6324 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6325 die_if_kernel(buffer, regs);
6326 }
6327 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6328 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6329 {
6330 char buffer[32];
6331 -
6332 +
6333 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6334 0, lvl, SIGTRAP) == NOTIFY_STOP)
6335 return;
6336
6337 +#ifdef CONFIG_PAX_REFCOUNT
6338 + if (lvl == 6)
6339 + pax_report_refcount_overflow(regs);
6340 +#endif
6341 +
6342 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6343
6344 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6345 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6346 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6347 printk("%s" "ERROR(%d): ",
6348 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6349 - printk("TPC<%pS>\n", (void *) regs->tpc);
6350 + printk("TPC<%pA>\n", (void *) regs->tpc);
6351 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6352 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6353 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6354 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6355 smp_processor_id(),
6356 (type & 0x1) ? 'I' : 'D',
6357 regs->tpc);
6358 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6359 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6360 panic("Irrecoverable Cheetah+ parity error.");
6361 }
6362
6363 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6364 smp_processor_id(),
6365 (type & 0x1) ? 'I' : 'D',
6366 regs->tpc);
6367 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6368 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6369 }
6370
6371 struct sun4v_error_entry {
6372 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6373
6374 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6375 regs->tpc, tl);
6376 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6377 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6378 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6379 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6380 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6381 (void *) regs->u_regs[UREG_I7]);
6382 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6383 "pte[%lx] error[%lx]\n",
6384 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6385
6386 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6387 regs->tpc, tl);
6388 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6389 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6390 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6391 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6392 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6393 (void *) regs->u_regs[UREG_I7]);
6394 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6395 "pte[%lx] error[%lx]\n",
6396 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6397 fp = (unsigned long)sf->fp + STACK_BIAS;
6398 }
6399
6400 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6401 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6402 } while (++count < 16);
6403 }
6404
6405 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6406 return (struct reg_window *) (fp + STACK_BIAS);
6407 }
6408
6409 +extern void gr_handle_kernel_exploit(void);
6410 +
6411 void die_if_kernel(char *str, struct pt_regs *regs)
6412 {
6413 static int die_counter;
6414 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6415 while (rw &&
6416 count++ < 30&&
6417 is_kernel_stack(current, rw)) {
6418 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
6419 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
6420 (void *) rw->ins[7]);
6421
6422 rw = kernel_stack_up(rw);
6423 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6424 }
6425 user_instruction_dump ((unsigned int __user *) regs->tpc);
6426 }
6427 - if (regs->tstate & TSTATE_PRIV)
6428 + if (regs->tstate & TSTATE_PRIV) {
6429 + gr_handle_kernel_exploit();
6430 do_exit(SIGKILL);
6431 + }
6432 +
6433 do_exit(SIGSEGV);
6434 }
6435 EXPORT_SYMBOL(die_if_kernel);
6436 diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
6437 index be183fe..1c8d332 100644
6438 --- a/arch/sparc/kernel/una_asm_64.S
6439 +++ b/arch/sparc/kernel/una_asm_64.S
6440 @@ -127,7 +127,7 @@ do_int_load:
6441 wr %o5, 0x0, %asi
6442 retl
6443 mov 0, %o0
6444 - .size __do_int_load, .-__do_int_load
6445 + .size do_int_load, .-do_int_load
6446
6447 .section __ex_table,"a"
6448 .word 4b, __retl_efault
6449 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6450 index 3792099..2af17d8 100644
6451 --- a/arch/sparc/kernel/unaligned_64.c
6452 +++ b/arch/sparc/kernel/unaligned_64.c
6453 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
6454 if (count < 5) {
6455 last_time = jiffies;
6456 count++;
6457 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6458 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6459 regs->tpc, (void *) regs->tpc);
6460 }
6461 }
6462 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6463 index e75faf0..24f12f9 100644
6464 --- a/arch/sparc/lib/Makefile
6465 +++ b/arch/sparc/lib/Makefile
6466 @@ -2,7 +2,7 @@
6467 #
6468
6469 asflags-y := -ansi -DST_DIV0=0x02
6470 -ccflags-y := -Werror
6471 +#ccflags-y := -Werror
6472
6473 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6474 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6475 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6476 index 0268210..f0291ca 100644
6477 --- a/arch/sparc/lib/atomic_64.S
6478 +++ b/arch/sparc/lib/atomic_64.S
6479 @@ -18,7 +18,12 @@
6480 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6481 BACKOFF_SETUP(%o2)
6482 1: lduw [%o1], %g1
6483 - add %g1, %o0, %g7
6484 + addcc %g1, %o0, %g7
6485 +
6486 +#ifdef CONFIG_PAX_REFCOUNT
6487 + tvs %icc, 6
6488 +#endif
6489 +
6490 cas [%o1], %g1, %g7
6491 cmp %g1, %g7
6492 bne,pn %icc, 2f
6493 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6494 2: BACKOFF_SPIN(%o2, %o3, 1b)
6495 .size atomic_add, .-atomic_add
6496
6497 + .globl atomic_add_unchecked
6498 + .type atomic_add_unchecked,#function
6499 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6500 + BACKOFF_SETUP(%o2)
6501 +1: lduw [%o1], %g1
6502 + add %g1, %o0, %g7
6503 + cas [%o1], %g1, %g7
6504 + cmp %g1, %g7
6505 + bne,pn %icc, 2f
6506 + nop
6507 + retl
6508 + nop
6509 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6510 + .size atomic_add_unchecked, .-atomic_add_unchecked
6511 +
6512 .globl atomic_sub
6513 .type atomic_sub,#function
6514 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6515 BACKOFF_SETUP(%o2)
6516 1: lduw [%o1], %g1
6517 - sub %g1, %o0, %g7
6518 + subcc %g1, %o0, %g7
6519 +
6520 +#ifdef CONFIG_PAX_REFCOUNT
6521 + tvs %icc, 6
6522 +#endif
6523 +
6524 cas [%o1], %g1, %g7
6525 cmp %g1, %g7
6526 bne,pn %icc, 2f
6527 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6528 2: BACKOFF_SPIN(%o2, %o3, 1b)
6529 .size atomic_sub, .-atomic_sub
6530
6531 + .globl atomic_sub_unchecked
6532 + .type atomic_sub_unchecked,#function
6533 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6534 + BACKOFF_SETUP(%o2)
6535 +1: lduw [%o1], %g1
6536 + sub %g1, %o0, %g7
6537 + cas [%o1], %g1, %g7
6538 + cmp %g1, %g7
6539 + bne,pn %icc, 2f
6540 + nop
6541 + retl
6542 + nop
6543 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6544 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
6545 +
6546 .globl atomic_add_ret
6547 .type atomic_add_ret,#function
6548 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6549 BACKOFF_SETUP(%o2)
6550 1: lduw [%o1], %g1
6551 - add %g1, %o0, %g7
6552 + addcc %g1, %o0, %g7
6553 +
6554 +#ifdef CONFIG_PAX_REFCOUNT
6555 + tvs %icc, 6
6556 +#endif
6557 +
6558 cas [%o1], %g1, %g7
6559 cmp %g1, %g7
6560 bne,pn %icc, 2f
6561 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6562 2: BACKOFF_SPIN(%o2, %o3, 1b)
6563 .size atomic_add_ret, .-atomic_add_ret
6564
6565 + .globl atomic_add_ret_unchecked
6566 + .type atomic_add_ret_unchecked,#function
6567 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6568 + BACKOFF_SETUP(%o2)
6569 +1: lduw [%o1], %g1
6570 + addcc %g1, %o0, %g7
6571 + cas [%o1], %g1, %g7
6572 + cmp %g1, %g7
6573 + bne,pn %icc, 2f
6574 + add %g7, %o0, %g7
6575 + sra %g7, 0, %o0
6576 + retl
6577 + nop
6578 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6579 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6580 +
6581 .globl atomic_sub_ret
6582 .type atomic_sub_ret,#function
6583 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6584 BACKOFF_SETUP(%o2)
6585 1: lduw [%o1], %g1
6586 - sub %g1, %o0, %g7
6587 + subcc %g1, %o0, %g7
6588 +
6589 +#ifdef CONFIG_PAX_REFCOUNT
6590 + tvs %icc, 6
6591 +#endif
6592 +
6593 cas [%o1], %g1, %g7
6594 cmp %g1, %g7
6595 bne,pn %icc, 2f
6596 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6597 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6598 BACKOFF_SETUP(%o2)
6599 1: ldx [%o1], %g1
6600 - add %g1, %o0, %g7
6601 + addcc %g1, %o0, %g7
6602 +
6603 +#ifdef CONFIG_PAX_REFCOUNT
6604 + tvs %xcc, 6
6605 +#endif
6606 +
6607 casx [%o1], %g1, %g7
6608 cmp %g1, %g7
6609 bne,pn %xcc, 2f
6610 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6611 2: BACKOFF_SPIN(%o2, %o3, 1b)
6612 .size atomic64_add, .-atomic64_add
6613
6614 + .globl atomic64_add_unchecked
6615 + .type atomic64_add_unchecked,#function
6616 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6617 + BACKOFF_SETUP(%o2)
6618 +1: ldx [%o1], %g1
6619 + addcc %g1, %o0, %g7
6620 + casx [%o1], %g1, %g7
6621 + cmp %g1, %g7
6622 + bne,pn %xcc, 2f
6623 + nop
6624 + retl
6625 + nop
6626 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6627 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
6628 +
6629 .globl atomic64_sub
6630 .type atomic64_sub,#function
6631 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6632 BACKOFF_SETUP(%o2)
6633 1: ldx [%o1], %g1
6634 - sub %g1, %o0, %g7
6635 + subcc %g1, %o0, %g7
6636 +
6637 +#ifdef CONFIG_PAX_REFCOUNT
6638 + tvs %xcc, 6
6639 +#endif
6640 +
6641 casx [%o1], %g1, %g7
6642 cmp %g1, %g7
6643 bne,pn %xcc, 2f
6644 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6645 2: BACKOFF_SPIN(%o2, %o3, 1b)
6646 .size atomic64_sub, .-atomic64_sub
6647
6648 + .globl atomic64_sub_unchecked
6649 + .type atomic64_sub_unchecked,#function
6650 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6651 + BACKOFF_SETUP(%o2)
6652 +1: ldx [%o1], %g1
6653 + subcc %g1, %o0, %g7
6654 + casx [%o1], %g1, %g7
6655 + cmp %g1, %g7
6656 + bne,pn %xcc, 2f
6657 + nop
6658 + retl
6659 + nop
6660 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6661 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6662 +
6663 .globl atomic64_add_ret
6664 .type atomic64_add_ret,#function
6665 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6666 BACKOFF_SETUP(%o2)
6667 1: ldx [%o1], %g1
6668 - add %g1, %o0, %g7
6669 + addcc %g1, %o0, %g7
6670 +
6671 +#ifdef CONFIG_PAX_REFCOUNT
6672 + tvs %xcc, 6
6673 +#endif
6674 +
6675 casx [%o1], %g1, %g7
6676 cmp %g1, %g7
6677 bne,pn %xcc, 2f
6678 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6679 2: BACKOFF_SPIN(%o2, %o3, 1b)
6680 .size atomic64_add_ret, .-atomic64_add_ret
6681
6682 + .globl atomic64_add_ret_unchecked
6683 + .type atomic64_add_ret_unchecked,#function
6684 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6685 + BACKOFF_SETUP(%o2)
6686 +1: ldx [%o1], %g1
6687 + addcc %g1, %o0, %g7
6688 + casx [%o1], %g1, %g7
6689 + cmp %g1, %g7
6690 + bne,pn %xcc, 2f
6691 + add %g7, %o0, %g7
6692 + mov %g7, %o0
6693 + retl
6694 + nop
6695 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6696 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6697 +
6698 .globl atomic64_sub_ret
6699 .type atomic64_sub_ret,#function
6700 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6701 BACKOFF_SETUP(%o2)
6702 1: ldx [%o1], %g1
6703 - sub %g1, %o0, %g7
6704 + subcc %g1, %o0, %g7
6705 +
6706 +#ifdef CONFIG_PAX_REFCOUNT
6707 + tvs %xcc, 6
6708 +#endif
6709 +
6710 casx [%o1], %g1, %g7
6711 cmp %g1, %g7
6712 bne,pn %xcc, 2f
6713 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6714 index 704b126..2e79d76 100644
6715 --- a/arch/sparc/lib/ksyms.c
6716 +++ b/arch/sparc/lib/ksyms.c
6717 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
6718
6719 /* Atomic counter implementation. */
6720 EXPORT_SYMBOL(atomic_add);
6721 +EXPORT_SYMBOL(atomic_add_unchecked);
6722 EXPORT_SYMBOL(atomic_add_ret);
6723 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6724 EXPORT_SYMBOL(atomic_sub);
6725 +EXPORT_SYMBOL(atomic_sub_unchecked);
6726 EXPORT_SYMBOL(atomic_sub_ret);
6727 EXPORT_SYMBOL(atomic64_add);
6728 +EXPORT_SYMBOL(atomic64_add_unchecked);
6729 EXPORT_SYMBOL(atomic64_add_ret);
6730 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6731 EXPORT_SYMBOL(atomic64_sub);
6732 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6733 EXPORT_SYMBOL(atomic64_sub_ret);
6734
6735 /* Atomic bit operations. */
6736 diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
6737 index 91a7d29..ce75c29 100644
6738 --- a/arch/sparc/lib/rwsem_64.S
6739 +++ b/arch/sparc/lib/rwsem_64.S
6740 @@ -11,7 +11,12 @@
6741 .globl __down_read
6742 __down_read:
6743 1: lduw [%o0], %g1
6744 - add %g1, 1, %g7
6745 + addcc %g1, 1, %g7
6746 +
6747 +#ifdef CONFIG_PAX_REFCOUNT
6748 + tvs %icc, 6
6749 +#endif
6750 +
6751 cas [%o0], %g1, %g7
6752 cmp %g1, %g7
6753 bne,pn %icc, 1b
6754 @@ -33,7 +38,12 @@ __down_read:
6755 .globl __down_read_trylock
6756 __down_read_trylock:
6757 1: lduw [%o0], %g1
6758 - add %g1, 1, %g7
6759 + addcc %g1, 1, %g7
6760 +
6761 +#ifdef CONFIG_PAX_REFCOUNT
6762 + tvs %icc, 6
6763 +#endif
6764 +
6765 cmp %g7, 0
6766 bl,pn %icc, 2f
6767 mov 0, %o1
6768 @@ -51,7 +61,12 @@ __down_write:
6769 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6770 1:
6771 lduw [%o0], %g3
6772 - add %g3, %g1, %g7
6773 + addcc %g3, %g1, %g7
6774 +
6775 +#ifdef CONFIG_PAX_REFCOUNT
6776 + tvs %icc, 6
6777 +#endif
6778 +
6779 cas [%o0], %g3, %g7
6780 cmp %g3, %g7
6781 bne,pn %icc, 1b
6782 @@ -77,7 +92,12 @@ __down_write_trylock:
6783 cmp %g3, 0
6784 bne,pn %icc, 2f
6785 mov 0, %o1
6786 - add %g3, %g1, %g7
6787 + addcc %g3, %g1, %g7
6788 +
6789 +#ifdef CONFIG_PAX_REFCOUNT
6790 + tvs %icc, 6
6791 +#endif
6792 +
6793 cas [%o0], %g3, %g7
6794 cmp %g3, %g7
6795 bne,pn %icc, 1b
6796 @@ -90,7 +110,12 @@ __down_write_trylock:
6797 __up_read:
6798 1:
6799 lduw [%o0], %g1
6800 - sub %g1, 1, %g7
6801 + subcc %g1, 1, %g7
6802 +
6803 +#ifdef CONFIG_PAX_REFCOUNT
6804 + tvs %icc, 6
6805 +#endif
6806 +
6807 cas [%o0], %g1, %g7
6808 cmp %g1, %g7
6809 bne,pn %icc, 1b
6810 @@ -118,7 +143,12 @@ __up_write:
6811 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6812 1:
6813 lduw [%o0], %g3
6814 - sub %g3, %g1, %g7
6815 + subcc %g3, %g1, %g7
6816 +
6817 +#ifdef CONFIG_PAX_REFCOUNT
6818 + tvs %icc, 6
6819 +#endif
6820 +
6821 cas [%o0], %g3, %g7
6822 cmp %g3, %g7
6823 bne,pn %icc, 1b
6824 @@ -143,7 +173,12 @@ __downgrade_write:
6825 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
6826 1:
6827 lduw [%o0], %g3
6828 - sub %g3, %g1, %g7
6829 + subcc %g3, %g1, %g7
6830 +
6831 +#ifdef CONFIG_PAX_REFCOUNT
6832 + tvs %icc, 6
6833 +#endif
6834 +
6835 cas [%o0], %g3, %g7
6836 cmp %g3, %g7
6837 bne,pn %icc, 1b
6838 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6839 index 79836a7..62f47a2 100644
6840 --- a/arch/sparc/mm/Makefile
6841 +++ b/arch/sparc/mm/Makefile
6842 @@ -2,7 +2,7 @@
6843 #
6844
6845 asflags-y := -ansi
6846 -ccflags-y := -Werror
6847 +#ccflags-y := -Werror
6848
6849 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6850 obj-y += fault_$(BITS).o
6851 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6852 index b99f81c..3453e93 100644
6853 --- a/arch/sparc/mm/fault_32.c
6854 +++ b/arch/sparc/mm/fault_32.c
6855 @@ -21,6 +21,9 @@
6856 #include <linux/interrupt.h>
6857 #include <linux/module.h>
6858 #include <linux/kdebug.h>
6859 +#include <linux/slab.h>
6860 +#include <linux/pagemap.h>
6861 +#include <linux/compiler.h>
6862
6863 #include <asm/system.h>
6864 #include <asm/page.h>
6865 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6866 return safe_compute_effective_address(regs, insn);
6867 }
6868
6869 +#ifdef CONFIG_PAX_PAGEEXEC
6870 +#ifdef CONFIG_PAX_DLRESOLVE
6871 +static void pax_emuplt_close(struct vm_area_struct *vma)
6872 +{
6873 + vma->vm_mm->call_dl_resolve = 0UL;
6874 +}
6875 +
6876 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6877 +{
6878 + unsigned int *kaddr;
6879 +
6880 + vmf->page = alloc_page(GFP_HIGHUSER);
6881 + if (!vmf->page)
6882 + return VM_FAULT_OOM;
6883 +
6884 + kaddr = kmap(vmf->page);
6885 + memset(kaddr, 0, PAGE_SIZE);
6886 + kaddr[0] = 0x9DE3BFA8U; /* save */
6887 + flush_dcache_page(vmf->page);
6888 + kunmap(vmf->page);
6889 + return VM_FAULT_MAJOR;
6890 +}
6891 +
6892 +static const struct vm_operations_struct pax_vm_ops = {
6893 + .close = pax_emuplt_close,
6894 + .fault = pax_emuplt_fault
6895 +};
6896 +
6897 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6898 +{
6899 + int ret;
6900 +
6901 + vma->vm_mm = current->mm;
6902 + vma->vm_start = addr;
6903 + vma->vm_end = addr + PAGE_SIZE;
6904 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6905 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6906 + vma->vm_ops = &pax_vm_ops;
6907 +
6908 + ret = insert_vm_struct(current->mm, vma);
6909 + if (ret)
6910 + return ret;
6911 +
6912 + ++current->mm->total_vm;
6913 + return 0;
6914 +}
6915 +#endif
6916 +
6917 +/*
6918 + * PaX: decide what to do with offenders (regs->pc = fault address)
6919 + *
6920 + * returns 1 when task should be killed
6921 + * 2 when patched PLT trampoline was detected
6922 + * 3 when unpatched PLT trampoline was detected
6923 + */
6924 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6925 +{
6926 +
6927 +#ifdef CONFIG_PAX_EMUPLT
6928 + int err;
6929 +
6930 + do { /* PaX: patched PLT emulation #1 */
6931 + unsigned int sethi1, sethi2, jmpl;
6932 +
6933 + err = get_user(sethi1, (unsigned int *)regs->pc);
6934 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6935 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6936 +
6937 + if (err)
6938 + break;
6939 +
6940 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6941 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6942 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6943 + {
6944 + unsigned int addr;
6945 +
6946 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6947 + addr = regs->u_regs[UREG_G1];
6948 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6949 + regs->pc = addr;
6950 + regs->npc = addr+4;
6951 + return 2;
6952 + }
6953 + } while (0);
6954 +
6955 + { /* PaX: patched PLT emulation #2 */
6956 + unsigned int ba;
6957 +
6958 + err = get_user(ba, (unsigned int *)regs->pc);
6959 +
6960 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6961 + unsigned int addr;
6962 +
6963 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6964 + regs->pc = addr;
6965 + regs->npc = addr+4;
6966 + return 2;
6967 + }
6968 + }
6969 +
6970 + do { /* PaX: patched PLT emulation #3 */
6971 + unsigned int sethi, jmpl, nop;
6972 +
6973 + err = get_user(sethi, (unsigned int *)regs->pc);
6974 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6975 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6976 +
6977 + if (err)
6978 + break;
6979 +
6980 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6981 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6982 + nop == 0x01000000U)
6983 + {
6984 + unsigned int addr;
6985 +
6986 + addr = (sethi & 0x003FFFFFU) << 10;
6987 + regs->u_regs[UREG_G1] = addr;
6988 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6989 + regs->pc = addr;
6990 + regs->npc = addr+4;
6991 + return 2;
6992 + }
6993 + } while (0);
6994 +
6995 + do { /* PaX: unpatched PLT emulation step 1 */
6996 + unsigned int sethi, ba, nop;
6997 +
6998 + err = get_user(sethi, (unsigned int *)regs->pc);
6999 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
7000 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
7001 +
7002 + if (err)
7003 + break;
7004 +
7005 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7006 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7007 + nop == 0x01000000U)
7008 + {
7009 + unsigned int addr, save, call;
7010 +
7011 + if ((ba & 0xFFC00000U) == 0x30800000U)
7012 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7013 + else
7014 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7015 +
7016 + err = get_user(save, (unsigned int *)addr);
7017 + err |= get_user(call, (unsigned int *)(addr+4));
7018 + err |= get_user(nop, (unsigned int *)(addr+8));
7019 + if (err)
7020 + break;
7021 +
7022 +#ifdef CONFIG_PAX_DLRESOLVE
7023 + if (save == 0x9DE3BFA8U &&
7024 + (call & 0xC0000000U) == 0x40000000U &&
7025 + nop == 0x01000000U)
7026 + {
7027 + struct vm_area_struct *vma;
7028 + unsigned long call_dl_resolve;
7029 +
7030 + down_read(&current->mm->mmap_sem);
7031 + call_dl_resolve = current->mm->call_dl_resolve;
7032 + up_read(&current->mm->mmap_sem);
7033 + if (likely(call_dl_resolve))
7034 + goto emulate;
7035 +
7036 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7037 +
7038 + down_write(&current->mm->mmap_sem);
7039 + if (current->mm->call_dl_resolve) {
7040 + call_dl_resolve = current->mm->call_dl_resolve;
7041 + up_write(&current->mm->mmap_sem);
7042 + if (vma)
7043 + kmem_cache_free(vm_area_cachep, vma);
7044 + goto emulate;
7045 + }
7046 +
7047 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7048 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7049 + up_write(&current->mm->mmap_sem);
7050 + if (vma)
7051 + kmem_cache_free(vm_area_cachep, vma);
7052 + return 1;
7053 + }
7054 +
7055 + if (pax_insert_vma(vma, call_dl_resolve)) {
7056 + up_write(&current->mm->mmap_sem);
7057 + kmem_cache_free(vm_area_cachep, vma);
7058 + return 1;
7059 + }
7060 +
7061 + current->mm->call_dl_resolve = call_dl_resolve;
7062 + up_write(&current->mm->mmap_sem);
7063 +
7064 +emulate:
7065 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7066 + regs->pc = call_dl_resolve;
7067 + regs->npc = addr+4;
7068 + return 3;
7069 + }
7070 +#endif
7071 +
7072 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7073 + if ((save & 0xFFC00000U) == 0x05000000U &&
7074 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7075 + nop == 0x01000000U)
7076 + {
7077 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7078 + regs->u_regs[UREG_G2] = addr + 4;
7079 + addr = (save & 0x003FFFFFU) << 10;
7080 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7081 + regs->pc = addr;
7082 + regs->npc = addr+4;
7083 + return 3;
7084 + }
7085 + }
7086 + } while (0);
7087 +
7088 + do { /* PaX: unpatched PLT emulation step 2 */
7089 + unsigned int save, call, nop;
7090 +
7091 + err = get_user(save, (unsigned int *)(regs->pc-4));
7092 + err |= get_user(call, (unsigned int *)regs->pc);
7093 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
7094 + if (err)
7095 + break;
7096 +
7097 + if (save == 0x9DE3BFA8U &&
7098 + (call & 0xC0000000U) == 0x40000000U &&
7099 + nop == 0x01000000U)
7100 + {
7101 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
7102 +
7103 + regs->u_regs[UREG_RETPC] = regs->pc;
7104 + regs->pc = dl_resolve;
7105 + regs->npc = dl_resolve+4;
7106 + return 3;
7107 + }
7108 + } while (0);
7109 +#endif
7110 +
7111 + return 1;
7112 +}
7113 +
7114 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7115 +{
7116 + unsigned long i;
7117 +
7118 + printk(KERN_ERR "PAX: bytes at PC: ");
7119 + for (i = 0; i < 8; i++) {
7120 + unsigned int c;
7121 + if (get_user(c, (unsigned int *)pc+i))
7122 + printk(KERN_CONT "???????? ");
7123 + else
7124 + printk(KERN_CONT "%08x ", c);
7125 + }
7126 + printk("\n");
7127 +}
7128 +#endif
7129 +
7130 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
7131 unsigned long address)
7132 {
7133 @@ -231,6 +495,24 @@ good_area:
7134 if(!(vma->vm_flags & VM_WRITE))
7135 goto bad_area;
7136 } else {
7137 +
7138 +#ifdef CONFIG_PAX_PAGEEXEC
7139 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
7140 + up_read(&mm->mmap_sem);
7141 + switch (pax_handle_fetch_fault(regs)) {
7142 +
7143 +#ifdef CONFIG_PAX_EMUPLT
7144 + case 2:
7145 + case 3:
7146 + return;
7147 +#endif
7148 +
7149 + }
7150 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
7151 + do_group_exit(SIGKILL);
7152 + }
7153 +#endif
7154 +
7155 /* Allow reads even for write-only mappings */
7156 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
7157 goto bad_area;
7158 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
7159 index 43b0da9..a0b78f9 100644
7160 --- a/arch/sparc/mm/fault_64.c
7161 +++ b/arch/sparc/mm/fault_64.c
7162 @@ -20,6 +20,9 @@
7163 #include <linux/kprobes.h>
7164 #include <linux/kdebug.h>
7165 #include <linux/percpu.h>
7166 +#include <linux/slab.h>
7167 +#include <linux/pagemap.h>
7168 +#include <linux/compiler.h>
7169
7170 #include <asm/page.h>
7171 #include <asm/pgtable.h>
7172 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
7173 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
7174 regs->tpc);
7175 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
7176 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
7177 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
7178 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
7179 dump_stack();
7180 unhandled_fault(regs->tpc, current, regs);
7181 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
7182 show_regs(regs);
7183 }
7184
7185 +#ifdef CONFIG_PAX_PAGEEXEC
7186 +#ifdef CONFIG_PAX_DLRESOLVE
7187 +static void pax_emuplt_close(struct vm_area_struct *vma)
7188 +{
7189 + vma->vm_mm->call_dl_resolve = 0UL;
7190 +}
7191 +
7192 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7193 +{
7194 + unsigned int *kaddr;
7195 +
7196 + vmf->page = alloc_page(GFP_HIGHUSER);
7197 + if (!vmf->page)
7198 + return VM_FAULT_OOM;
7199 +
7200 + kaddr = kmap(vmf->page);
7201 + memset(kaddr, 0, PAGE_SIZE);
7202 + kaddr[0] = 0x9DE3BFA8U; /* save */
7203 + flush_dcache_page(vmf->page);
7204 + kunmap(vmf->page);
7205 + return VM_FAULT_MAJOR;
7206 +}
7207 +
7208 +static const struct vm_operations_struct pax_vm_ops = {
7209 + .close = pax_emuplt_close,
7210 + .fault = pax_emuplt_fault
7211 +};
7212 +
7213 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7214 +{
7215 + int ret;
7216 +
7217 + vma->vm_mm = current->mm;
7218 + vma->vm_start = addr;
7219 + vma->vm_end = addr + PAGE_SIZE;
7220 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7221 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7222 + vma->vm_ops = &pax_vm_ops;
7223 +
7224 + ret = insert_vm_struct(current->mm, vma);
7225 + if (ret)
7226 + return ret;
7227 +
7228 + ++current->mm->total_vm;
7229 + return 0;
7230 +}
7231 +#endif
7232 +
7233 +/*
7234 + * PaX: decide what to do with offenders (regs->tpc = fault address)
7235 + *
7236 + * returns 1 when task should be killed
7237 + * 2 when patched PLT trampoline was detected
7238 + * 3 when unpatched PLT trampoline was detected
7239 + */
7240 +static int pax_handle_fetch_fault(struct pt_regs *regs)
7241 +{
7242 +
7243 +#ifdef CONFIG_PAX_EMUPLT
7244 + int err;
7245 +
7246 + do { /* PaX: patched PLT emulation #1 */
7247 + unsigned int sethi1, sethi2, jmpl;
7248 +
7249 + err = get_user(sethi1, (unsigned int *)regs->tpc);
7250 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
7251 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
7252 +
7253 + if (err)
7254 + break;
7255 +
7256 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7257 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
7258 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
7259 + {
7260 + unsigned long addr;
7261 +
7262 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7263 + addr = regs->u_regs[UREG_G1];
7264 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7265 +
7266 + if (test_thread_flag(TIF_32BIT))
7267 + addr &= 0xFFFFFFFFUL;
7268 +
7269 + regs->tpc = addr;
7270 + regs->tnpc = addr+4;
7271 + return 2;
7272 + }
7273 + } while (0);
7274 +
7275 + { /* PaX: patched PLT emulation #2 */
7276 + unsigned int ba;
7277 +
7278 + err = get_user(ba, (unsigned int *)regs->tpc);
7279 +
7280 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
7281 + unsigned long addr;
7282 +
7283 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7284 +
7285 + if (test_thread_flag(TIF_32BIT))
7286 + addr &= 0xFFFFFFFFUL;
7287 +
7288 + regs->tpc = addr;
7289 + regs->tnpc = addr+4;
7290 + return 2;
7291 + }
7292 + }
7293 +
7294 + do { /* PaX: patched PLT emulation #3 */
7295 + unsigned int sethi, jmpl, nop;
7296 +
7297 + err = get_user(sethi, (unsigned int *)regs->tpc);
7298 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
7299 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7300 +
7301 + if (err)
7302 + break;
7303 +
7304 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7305 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
7306 + nop == 0x01000000U)
7307 + {
7308 + unsigned long addr;
7309 +
7310 + addr = (sethi & 0x003FFFFFU) << 10;
7311 + regs->u_regs[UREG_G1] = addr;
7312 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7313 +
7314 + if (test_thread_flag(TIF_32BIT))
7315 + addr &= 0xFFFFFFFFUL;
7316 +
7317 + regs->tpc = addr;
7318 + regs->tnpc = addr+4;
7319 + return 2;
7320 + }
7321 + } while (0);
7322 +
7323 + do { /* PaX: patched PLT emulation #4 */
7324 + unsigned int sethi, mov1, call, mov2;
7325 +
7326 + err = get_user(sethi, (unsigned int *)regs->tpc);
7327 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7328 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
7329 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7330 +
7331 + if (err)
7332 + break;
7333 +
7334 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7335 + mov1 == 0x8210000FU &&
7336 + (call & 0xC0000000U) == 0x40000000U &&
7337 + mov2 == 0x9E100001U)
7338 + {
7339 + unsigned long addr;
7340 +
7341 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7342 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7343 +
7344 + if (test_thread_flag(TIF_32BIT))
7345 + addr &= 0xFFFFFFFFUL;
7346 +
7347 + regs->tpc = addr;
7348 + regs->tnpc = addr+4;
7349 + return 2;
7350 + }
7351 + } while (0);
7352 +
7353 + do { /* PaX: patched PLT emulation #5 */
7354 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7355 +
7356 + err = get_user(sethi, (unsigned int *)regs->tpc);
7357 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7358 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7359 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7360 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7361 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7362 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7363 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7364 +
7365 + if (err)
7366 + break;
7367 +
7368 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7369 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7370 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7371 + (or1 & 0xFFFFE000U) == 0x82106000U &&
7372 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7373 + sllx == 0x83287020U &&
7374 + jmpl == 0x81C04005U &&
7375 + nop == 0x01000000U)
7376 + {
7377 + unsigned long addr;
7378 +
7379 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7380 + regs->u_regs[UREG_G1] <<= 32;
7381 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7382 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7383 + regs->tpc = addr;
7384 + regs->tnpc = addr+4;
7385 + return 2;
7386 + }
7387 + } while (0);
7388 +
7389 + do { /* PaX: patched PLT emulation #6 */
7390 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7391 +
7392 + err = get_user(sethi, (unsigned int *)regs->tpc);
7393 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7394 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7395 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7396 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
7397 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7398 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7399 +
7400 + if (err)
7401 + break;
7402 +
7403 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7404 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7405 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7406 + sllx == 0x83287020U &&
7407 + (or & 0xFFFFE000U) == 0x8A116000U &&
7408 + jmpl == 0x81C04005U &&
7409 + nop == 0x01000000U)
7410 + {
7411 + unsigned long addr;
7412 +
7413 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7414 + regs->u_regs[UREG_G1] <<= 32;
7415 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7416 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7417 + regs->tpc = addr;
7418 + regs->tnpc = addr+4;
7419 + return 2;
7420 + }
7421 + } while (0);
7422 +
7423 + do { /* PaX: unpatched PLT emulation step 1 */
7424 + unsigned int sethi, ba, nop;
7425 +
7426 + err = get_user(sethi, (unsigned int *)regs->tpc);
7427 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7428 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7429 +
7430 + if (err)
7431 + break;
7432 +
7433 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7434 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7435 + nop == 0x01000000U)
7436 + {
7437 + unsigned long addr;
7438 + unsigned int save, call;
7439 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7440 +
7441 + if ((ba & 0xFFC00000U) == 0x30800000U)
7442 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7443 + else
7444 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7445 +
7446 + if (test_thread_flag(TIF_32BIT))
7447 + addr &= 0xFFFFFFFFUL;
7448 +
7449 + err = get_user(save, (unsigned int *)addr);
7450 + err |= get_user(call, (unsigned int *)(addr+4));
7451 + err |= get_user(nop, (unsigned int *)(addr+8));
7452 + if (err)
7453 + break;
7454 +
7455 +#ifdef CONFIG_PAX_DLRESOLVE
7456 + if (save == 0x9DE3BFA8U &&
7457 + (call & 0xC0000000U) == 0x40000000U &&
7458 + nop == 0x01000000U)
7459 + {
7460 + struct vm_area_struct *vma;
7461 + unsigned long call_dl_resolve;
7462 +
7463 + down_read(&current->mm->mmap_sem);
7464 + call_dl_resolve = current->mm->call_dl_resolve;
7465 + up_read(&current->mm->mmap_sem);
7466 + if (likely(call_dl_resolve))
7467 + goto emulate;
7468 +
7469 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7470 +
7471 + down_write(&current->mm->mmap_sem);
7472 + if (current->mm->call_dl_resolve) {
7473 + call_dl_resolve = current->mm->call_dl_resolve;
7474 + up_write(&current->mm->mmap_sem);
7475 + if (vma)
7476 + kmem_cache_free(vm_area_cachep, vma);
7477 + goto emulate;
7478 + }
7479 +
7480 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7481 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7482 + up_write(&current->mm->mmap_sem);
7483 + if (vma)
7484 + kmem_cache_free(vm_area_cachep, vma);
7485 + return 1;
7486 + }
7487 +
7488 + if (pax_insert_vma(vma, call_dl_resolve)) {
7489 + up_write(&current->mm->mmap_sem);
7490 + kmem_cache_free(vm_area_cachep, vma);
7491 + return 1;
7492 + }
7493 +
7494 + current->mm->call_dl_resolve = call_dl_resolve;
7495 + up_write(&current->mm->mmap_sem);
7496 +
7497 +emulate:
7498 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7499 + regs->tpc = call_dl_resolve;
7500 + regs->tnpc = addr+4;
7501 + return 3;
7502 + }
7503 +#endif
7504 +
7505 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7506 + if ((save & 0xFFC00000U) == 0x05000000U &&
7507 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7508 + nop == 0x01000000U)
7509 + {
7510 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7511 + regs->u_regs[UREG_G2] = addr + 4;
7512 + addr = (save & 0x003FFFFFU) << 10;
7513 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7514 +
7515 + if (test_thread_flag(TIF_32BIT))
7516 + addr &= 0xFFFFFFFFUL;
7517 +
7518 + regs->tpc = addr;
7519 + regs->tnpc = addr+4;
7520 + return 3;
7521 + }
7522 +
7523 + /* PaX: 64-bit PLT stub */
7524 + err = get_user(sethi1, (unsigned int *)addr);
7525 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7526 + err |= get_user(or1, (unsigned int *)(addr+8));
7527 + err |= get_user(or2, (unsigned int *)(addr+12));
7528 + err |= get_user(sllx, (unsigned int *)(addr+16));
7529 + err |= get_user(add, (unsigned int *)(addr+20));
7530 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7531 + err |= get_user(nop, (unsigned int *)(addr+28));
7532 + if (err)
7533 + break;
7534 +
7535 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7536 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7537 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7538 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7539 + sllx == 0x89293020U &&
7540 + add == 0x8A010005U &&
7541 + jmpl == 0x89C14000U &&
7542 + nop == 0x01000000U)
7543 + {
7544 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7545 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7546 + regs->u_regs[UREG_G4] <<= 32;
7547 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7548 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7549 + regs->u_regs[UREG_G4] = addr + 24;
7550 + addr = regs->u_regs[UREG_G5];
7551 + regs->tpc = addr;
7552 + regs->tnpc = addr+4;
7553 + return 3;
7554 + }
7555 + }
7556 + } while (0);
7557 +
7558 +#ifdef CONFIG_PAX_DLRESOLVE
7559 + do { /* PaX: unpatched PLT emulation step 2 */
7560 + unsigned int save, call, nop;
7561 +
7562 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7563 + err |= get_user(call, (unsigned int *)regs->tpc);
7564 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7565 + if (err)
7566 + break;
7567 +
7568 + if (save == 0x9DE3BFA8U &&
7569 + (call & 0xC0000000U) == 0x40000000U &&
7570 + nop == 0x01000000U)
7571 + {
7572 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7573 +
7574 + if (test_thread_flag(TIF_32BIT))
7575 + dl_resolve &= 0xFFFFFFFFUL;
7576 +
7577 + regs->u_regs[UREG_RETPC] = regs->tpc;
7578 + regs->tpc = dl_resolve;
7579 + regs->tnpc = dl_resolve+4;
7580 + return 3;
7581 + }
7582 + } while (0);
7583 +#endif
7584 +
7585 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7586 + unsigned int sethi, ba, nop;
7587 +
7588 + err = get_user(sethi, (unsigned int *)regs->tpc);
7589 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7590 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7591 +
7592 + if (err)
7593 + break;
7594 +
7595 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7596 + (ba & 0xFFF00000U) == 0x30600000U &&
7597 + nop == 0x01000000U)
7598 + {
7599 + unsigned long addr;
7600 +
7601 + addr = (sethi & 0x003FFFFFU) << 10;
7602 + regs->u_regs[UREG_G1] = addr;
7603 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7604 +
7605 + if (test_thread_flag(TIF_32BIT))
7606 + addr &= 0xFFFFFFFFUL;
7607 +
7608 + regs->tpc = addr;
7609 + regs->tnpc = addr+4;
7610 + return 2;
7611 + }
7612 + } while (0);
7613 +
7614 +#endif
7615 +
7616 + return 1;
7617 +}
7618 +
7619 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7620 +{
7621 + unsigned long i;
7622 +
7623 + printk(KERN_ERR "PAX: bytes at PC: ");
7624 + for (i = 0; i < 8; i++) {
7625 + unsigned int c;
7626 + if (get_user(c, (unsigned int *)pc+i))
7627 + printk(KERN_CONT "???????? ");
7628 + else
7629 + printk(KERN_CONT "%08x ", c);
7630 + }
7631 + printk("\n");
7632 +}
7633 +#endif
7634 +
7635 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7636 {
7637 struct mm_struct *mm = current->mm;
7638 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7639 if (!vma)
7640 goto bad_area;
7641
7642 +#ifdef CONFIG_PAX_PAGEEXEC
7643 + /* PaX: detect ITLB misses on non-exec pages */
7644 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7645 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7646 + {
7647 + if (address != regs->tpc)
7648 + goto good_area;
7649 +
7650 + up_read(&mm->mmap_sem);
7651 + switch (pax_handle_fetch_fault(regs)) {
7652 +
7653 +#ifdef CONFIG_PAX_EMUPLT
7654 + case 2:
7655 + case 3:
7656 + return;
7657 +#endif
7658 +
7659 + }
7660 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7661 + do_group_exit(SIGKILL);
7662 + }
7663 +#endif
7664 +
7665 /* Pure DTLB misses do not tell us whether the fault causing
7666 * load/store/atomic was a write or not, it only says that there
7667 * was no match. So in such a case we (carefully) read the
7668 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7669 index f27d103..1b06377 100644
7670 --- a/arch/sparc/mm/hugetlbpage.c
7671 +++ b/arch/sparc/mm/hugetlbpage.c
7672 @@ -69,7 +69,7 @@ full_search:
7673 }
7674 return -ENOMEM;
7675 }
7676 - if (likely(!vma || addr + len <= vma->vm_start)) {
7677 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7678 /*
7679 * Remember the place where we stopped the search:
7680 */
7681 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7682 /* make sure it can fit in the remaining address space */
7683 if (likely(addr > len)) {
7684 vma = find_vma(mm, addr-len);
7685 - if (!vma || addr <= vma->vm_start) {
7686 + if (check_heap_stack_gap(vma, addr - len, len)) {
7687 /* remember the address as a hint for next time */
7688 return (mm->free_area_cache = addr-len);
7689 }
7690 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7691 if (unlikely(mm->mmap_base < len))
7692 goto bottomup;
7693
7694 - addr = (mm->mmap_base-len) & HPAGE_MASK;
7695 + addr = mm->mmap_base - len;
7696
7697 do {
7698 + addr &= HPAGE_MASK;
7699 /*
7700 * Lookup failure means no vma is above this address,
7701 * else if new region fits below vma->vm_start,
7702 * return with success:
7703 */
7704 vma = find_vma(mm, addr);
7705 - if (likely(!vma || addr+len <= vma->vm_start)) {
7706 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7707 /* remember the address as a hint for next time */
7708 return (mm->free_area_cache = addr);
7709 }
7710 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7711 mm->cached_hole_size = vma->vm_start - addr;
7712
7713 /* try just below the current vma->vm_start */
7714 - addr = (vma->vm_start-len) & HPAGE_MASK;
7715 - } while (likely(len < vma->vm_start));
7716 + addr = skip_heap_stack_gap(vma, len);
7717 + } while (!IS_ERR_VALUE(addr));
7718
7719 bottomup:
7720 /*
7721 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7722 if (addr) {
7723 addr = ALIGN(addr, HPAGE_SIZE);
7724 vma = find_vma(mm, addr);
7725 - if (task_size - len >= addr &&
7726 - (!vma || addr + len <= vma->vm_start))
7727 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7728 return addr;
7729 }
7730 if (mm->get_unmapped_area == arch_get_unmapped_area)
7731 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7732 index dc7c3b1..34c0070 100644
7733 --- a/arch/sparc/mm/init_32.c
7734 +++ b/arch/sparc/mm/init_32.c
7735 @@ -317,6 +317,9 @@ extern void device_scan(void);
7736 pgprot_t PAGE_SHARED __read_mostly;
7737 EXPORT_SYMBOL(PAGE_SHARED);
7738
7739 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7740 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7741 +
7742 void __init paging_init(void)
7743 {
7744 switch(sparc_cpu_model) {
7745 @@ -345,17 +348,17 @@ void __init paging_init(void)
7746
7747 /* Initialize the protection map with non-constant, MMU dependent values. */
7748 protection_map[0] = PAGE_NONE;
7749 - protection_map[1] = PAGE_READONLY;
7750 - protection_map[2] = PAGE_COPY;
7751 - protection_map[3] = PAGE_COPY;
7752 + protection_map[1] = PAGE_READONLY_NOEXEC;
7753 + protection_map[2] = PAGE_COPY_NOEXEC;
7754 + protection_map[3] = PAGE_COPY_NOEXEC;
7755 protection_map[4] = PAGE_READONLY;
7756 protection_map[5] = PAGE_READONLY;
7757 protection_map[6] = PAGE_COPY;
7758 protection_map[7] = PAGE_COPY;
7759 protection_map[8] = PAGE_NONE;
7760 - protection_map[9] = PAGE_READONLY;
7761 - protection_map[10] = PAGE_SHARED;
7762 - protection_map[11] = PAGE_SHARED;
7763 + protection_map[9] = PAGE_READONLY_NOEXEC;
7764 + protection_map[10] = PAGE_SHARED_NOEXEC;
7765 + protection_map[11] = PAGE_SHARED_NOEXEC;
7766 protection_map[12] = PAGE_READONLY;
7767 protection_map[13] = PAGE_READONLY;
7768 protection_map[14] = PAGE_SHARED;
7769 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7770 index 509b1ff..bfd7118 100644
7771 --- a/arch/sparc/mm/srmmu.c
7772 +++ b/arch/sparc/mm/srmmu.c
7773 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7774 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7775 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7776 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7777 +
7778 +#ifdef CONFIG_PAX_PAGEEXEC
7779 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7780 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7781 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7782 +#endif
7783 +
7784 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7785 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7786
7787 diff --git a/arch/um/Makefile b/arch/um/Makefile
7788 index fc633db..5e1a1c2 100644
7789 --- a/arch/um/Makefile
7790 +++ b/arch/um/Makefile
7791 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7792 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7793 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
7794
7795 +ifdef CONSTIFY_PLUGIN
7796 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7797 +endif
7798 +
7799 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
7800
7801 #This will adjust *FLAGS accordingly to the platform.
7802 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7803 index 19e1bdd..3665b77 100644
7804 --- a/arch/um/include/asm/cache.h
7805 +++ b/arch/um/include/asm/cache.h
7806 @@ -1,6 +1,7 @@
7807 #ifndef __UM_CACHE_H
7808 #define __UM_CACHE_H
7809
7810 +#include <linux/const.h>
7811
7812 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7813 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7814 @@ -12,6 +13,6 @@
7815 # define L1_CACHE_SHIFT 5
7816 #endif
7817
7818 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7819 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7820
7821 #endif
7822 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7823 index 6c03acd..a5e0215 100644
7824 --- a/arch/um/include/asm/kmap_types.h
7825 +++ b/arch/um/include/asm/kmap_types.h
7826 @@ -23,6 +23,7 @@ enum km_type {
7827 KM_IRQ1,
7828 KM_SOFTIRQ0,
7829 KM_SOFTIRQ1,
7830 + KM_CLEARPAGE,
7831 KM_TYPE_NR
7832 };
7833
7834 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7835 index 4cc9b6c..02e5029 100644
7836 --- a/arch/um/include/asm/page.h
7837 +++ b/arch/um/include/asm/page.h
7838 @@ -14,6 +14,9 @@
7839 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7840 #define PAGE_MASK (~(PAGE_SIZE-1))
7841
7842 +#define ktla_ktva(addr) (addr)
7843 +#define ktva_ktla(addr) (addr)
7844 +
7845 #ifndef __ASSEMBLY__
7846
7847 struct page;
7848 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7849 index 4a28a15..654dc2a 100644
7850 --- a/arch/um/kernel/process.c
7851 +++ b/arch/um/kernel/process.c
7852 @@ -393,22 +393,6 @@ int singlestepping(void * t)
7853 return 2;
7854 }
7855
7856 -/*
7857 - * Only x86 and x86_64 have an arch_align_stack().
7858 - * All other arches have "#define arch_align_stack(x) (x)"
7859 - * in their asm/system.h
7860 - * As this is included in UML from asm-um/system-generic.h,
7861 - * we can use it to behave as the subarch does.
7862 - */
7863 -#ifndef arch_align_stack
7864 -unsigned long arch_align_stack(unsigned long sp)
7865 -{
7866 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7867 - sp -= get_random_int() % 8192;
7868 - return sp & ~0xf;
7869 -}
7870 -#endif
7871 -
7872 unsigned long get_wchan(struct task_struct *p)
7873 {
7874 unsigned long stack_page, sp, ip;
7875 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7876 index d1b93c4..ae1b7fd 100644
7877 --- a/arch/um/sys-i386/shared/sysdep/system.h
7878 +++ b/arch/um/sys-i386/shared/sysdep/system.h
7879 @@ -17,7 +17,7 @@
7880 # define AT_VECTOR_SIZE_ARCH 1
7881 #endif
7882
7883 -extern unsigned long arch_align_stack(unsigned long sp);
7884 +#define arch_align_stack(x) ((x) & ~0xfUL)
7885
7886 void default_idle(void);
7887
7888 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7889 index 857ca0b..9a2669d 100644
7890 --- a/arch/um/sys-i386/syscalls.c
7891 +++ b/arch/um/sys-i386/syscalls.c
7892 @@ -11,6 +11,21 @@
7893 #include "asm/uaccess.h"
7894 #include "asm/unistd.h"
7895
7896 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7897 +{
7898 + unsigned long pax_task_size = TASK_SIZE;
7899 +
7900 +#ifdef CONFIG_PAX_SEGMEXEC
7901 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7902 + pax_task_size = SEGMEXEC_TASK_SIZE;
7903 +#endif
7904 +
7905 + if (len > pax_task_size || addr > pax_task_size - len)
7906 + return -EINVAL;
7907 +
7908 + return 0;
7909 +}
7910 +
7911 /*
7912 * Perform the select(nd, in, out, ex, tv) and mmap() system
7913 * calls. Linux/i386 didn't use to be able to handle more than
7914 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7915 index d1b93c4..ae1b7fd 100644
7916 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
7917 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7918 @@ -17,7 +17,7 @@
7919 # define AT_VECTOR_SIZE_ARCH 1
7920 #endif
7921
7922 -extern unsigned long arch_align_stack(unsigned long sp);
7923 +#define arch_align_stack(x) ((x) & ~0xfUL)
7924
7925 void default_idle(void);
7926
7927 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7928 index 73ae02a..f932de5 100644
7929 --- a/arch/x86/Kconfig
7930 +++ b/arch/x86/Kconfig
7931 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7932
7933 config X86_32_LAZY_GS
7934 def_bool y
7935 - depends on X86_32 && !CC_STACKPROTECTOR
7936 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7937
7938 config KTIME_SCALAR
7939 def_bool X86_32
7940 @@ -1008,7 +1008,7 @@ choice
7941
7942 config NOHIGHMEM
7943 bool "off"
7944 - depends on !X86_NUMAQ
7945 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7946 ---help---
7947 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7948 However, the address space of 32-bit x86 processors is only 4
7949 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
7950
7951 config HIGHMEM4G
7952 bool "4GB"
7953 - depends on !X86_NUMAQ
7954 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7955 ---help---
7956 Select this if you have a 32-bit processor and between 1 and 4
7957 gigabytes of physical RAM.
7958 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7959 hex
7960 default 0xB0000000 if VMSPLIT_3G_OPT
7961 default 0x80000000 if VMSPLIT_2G
7962 - default 0x78000000 if VMSPLIT_2G_OPT
7963 + default 0x70000000 if VMSPLIT_2G_OPT
7964 default 0x40000000 if VMSPLIT_1G
7965 default 0xC0000000
7966 depends on X86_32
7967 @@ -1460,6 +1460,7 @@ config SECCOMP
7968
7969 config CC_STACKPROTECTOR
7970 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7971 + depends on X86_64 || !PAX_MEMORY_UDEREF
7972 ---help---
7973 This option turns on the -fstack-protector GCC feature. This
7974 feature puts, at the beginning of functions, a canary value on
7975 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7976 config PHYSICAL_START
7977 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7978 default "0x1000000"
7979 + range 0x400000 0x40000000
7980 ---help---
7981 This gives the physical address where the kernel is loaded.
7982
7983 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7984 hex
7985 prompt "Alignment value to which kernel should be aligned" if X86_32
7986 default "0x1000000"
7987 + range 0x400000 0x1000000 if PAX_KERNEXEC
7988 range 0x2000 0x1000000
7989 ---help---
7990 This value puts the alignment restrictions on physical address
7991 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7992 Say N if you want to disable CPU hotplug.
7993
7994 config COMPAT_VDSO
7995 - def_bool y
7996 + def_bool n
7997 prompt "Compat VDSO support"
7998 depends on X86_32 || IA32_EMULATION
7999 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
8000 ---help---
8001 Map the 32-bit VDSO to the predictable old-style address too.
8002 ---help---
8003 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
8004 index 0e566103..1a6b57e 100644
8005 --- a/arch/x86/Kconfig.cpu
8006 +++ b/arch/x86/Kconfig.cpu
8007 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
8008
8009 config X86_F00F_BUG
8010 def_bool y
8011 - depends on M586MMX || M586TSC || M586 || M486 || M386
8012 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
8013
8014 config X86_WP_WORKS_OK
8015 def_bool y
8016 @@ -360,7 +360,7 @@ config X86_POPAD_OK
8017
8018 config X86_ALIGNMENT_16
8019 def_bool y
8020 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8021 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8022
8023 config X86_INTEL_USERCOPY
8024 def_bool y
8025 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
8026 # generates cmov.
8027 config X86_CMOV
8028 def_bool y
8029 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
8030 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
8031
8032 config X86_MINIMUM_CPU_FAMILY
8033 int
8034 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
8035 index d105f29..c928727 100644
8036 --- a/arch/x86/Kconfig.debug
8037 +++ b/arch/x86/Kconfig.debug
8038 @@ -99,7 +99,7 @@ config X86_PTDUMP
8039 config DEBUG_RODATA
8040 bool "Write protect kernel read-only data structures"
8041 default y
8042 - depends on DEBUG_KERNEL
8043 + depends on DEBUG_KERNEL && BROKEN
8044 ---help---
8045 Mark the kernel read-only data as write-protected in the pagetables,
8046 in order to catch accidental (and incorrect) writes to such const
8047 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
8048 index d2d24c9..0f21f8d 100644
8049 --- a/arch/x86/Makefile
8050 +++ b/arch/x86/Makefile
8051 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
8052 else
8053 BITS := 64
8054 UTS_MACHINE := x86_64
8055 + biarch := $(call cc-option,-m64)
8056 CHECKFLAGS += -D__x86_64__ -m64
8057
8058 KBUILD_AFLAGS += -m64
8059 @@ -189,3 +190,12 @@ define archhelp
8060 echo ' FDARGS="..." arguments for the booted kernel'
8061 echo ' FDINITRD=file initrd for the booted kernel'
8062 endef
8063 +
8064 +define OLD_LD
8065 +
8066 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
8067 +*** Please upgrade your binutils to 2.18 or newer
8068 +endef
8069 +
8070 +archprepare:
8071 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
8072 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
8073 index ec749c2..bbb5319 100644
8074 --- a/arch/x86/boot/Makefile
8075 +++ b/arch/x86/boot/Makefile
8076 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
8077 $(call cc-option, -fno-stack-protector) \
8078 $(call cc-option, -mpreferred-stack-boundary=2)
8079 KBUILD_CFLAGS += $(call cc-option, -m32)
8080 +ifdef CONSTIFY_PLUGIN
8081 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
8082 +endif
8083 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8084 GCOV_PROFILE := n
8085
8086 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
8087 index 878e4b9..20537ab 100644
8088 --- a/arch/x86/boot/bitops.h
8089 +++ b/arch/x86/boot/bitops.h
8090 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8091 u8 v;
8092 const u32 *p = (const u32 *)addr;
8093
8094 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8095 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8096 return v;
8097 }
8098
8099 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8100
8101 static inline void set_bit(int nr, void *addr)
8102 {
8103 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8104 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8105 }
8106
8107 #endif /* BOOT_BITOPS_H */
8108 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
8109 index 98239d2..f40214c 100644
8110 --- a/arch/x86/boot/boot.h
8111 +++ b/arch/x86/boot/boot.h
8112 @@ -82,7 +82,7 @@ static inline void io_delay(void)
8113 static inline u16 ds(void)
8114 {
8115 u16 seg;
8116 - asm("movw %%ds,%0" : "=rm" (seg));
8117 + asm volatile("movw %%ds,%0" : "=rm" (seg));
8118 return seg;
8119 }
8120
8121 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
8122 static inline int memcmp(const void *s1, const void *s2, size_t len)
8123 {
8124 u8 diff;
8125 - asm("repe; cmpsb; setnz %0"
8126 + asm volatile("repe; cmpsb; setnz %0"
8127 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
8128 return diff;
8129 }
8130 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
8131 index f8ed065..5bf5ff3 100644
8132 --- a/arch/x86/boot/compressed/Makefile
8133 +++ b/arch/x86/boot/compressed/Makefile
8134 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
8135 KBUILD_CFLAGS += $(cflags-y)
8136 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
8137 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
8138 +ifdef CONSTIFY_PLUGIN
8139 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
8140 +endif
8141
8142 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8143 GCOV_PROFILE := n
8144 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
8145 index f543b70..b60fba8 100644
8146 --- a/arch/x86/boot/compressed/head_32.S
8147 +++ b/arch/x86/boot/compressed/head_32.S
8148 @@ -76,7 +76,7 @@ ENTRY(startup_32)
8149 notl %eax
8150 andl %eax, %ebx
8151 #else
8152 - movl $LOAD_PHYSICAL_ADDR, %ebx
8153 + movl $____LOAD_PHYSICAL_ADDR, %ebx
8154 #endif
8155
8156 /* Target address to relocate to for decompression */
8157 @@ -149,7 +149,7 @@ relocated:
8158 * and where it was actually loaded.
8159 */
8160 movl %ebp, %ebx
8161 - subl $LOAD_PHYSICAL_ADDR, %ebx
8162 + subl $____LOAD_PHYSICAL_ADDR, %ebx
8163 jz 2f /* Nothing to be done if loaded at compiled addr. */
8164 /*
8165 * Process relocations.
8166 @@ -157,8 +157,7 @@ relocated:
8167
8168 1: subl $4, %edi
8169 movl (%edi), %ecx
8170 - testl %ecx, %ecx
8171 - jz 2f
8172 + jecxz 2f
8173 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
8174 jmp 1b
8175 2:
8176 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
8177 index 077e1b6..2c6b13b 100644
8178 --- a/arch/x86/boot/compressed/head_64.S
8179 +++ b/arch/x86/boot/compressed/head_64.S
8180 @@ -91,7 +91,7 @@ ENTRY(startup_32)
8181 notl %eax
8182 andl %eax, %ebx
8183 #else
8184 - movl $LOAD_PHYSICAL_ADDR, %ebx
8185 + movl $____LOAD_PHYSICAL_ADDR, %ebx
8186 #endif
8187
8188 /* Target address to relocate to for decompression */
8189 @@ -183,7 +183,7 @@ no_longmode:
8190 hlt
8191 jmp 1b
8192
8193 -#include "../../kernel/verify_cpu_64.S"
8194 +#include "../../kernel/verify_cpu.S"
8195
8196 /*
8197 * Be careful here startup_64 needs to be at a predictable
8198 @@ -234,7 +234,7 @@ ENTRY(startup_64)
8199 notq %rax
8200 andq %rax, %rbp
8201 #else
8202 - movq $LOAD_PHYSICAL_ADDR, %rbp
8203 + movq $____LOAD_PHYSICAL_ADDR, %rbp
8204 #endif
8205
8206 /* Target address to relocate to for decompression */
8207 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
8208 index 842b2a3..f00178b 100644
8209 --- a/arch/x86/boot/compressed/misc.c
8210 +++ b/arch/x86/boot/compressed/misc.c
8211 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
8212 case PT_LOAD:
8213 #ifdef CONFIG_RELOCATABLE
8214 dest = output;
8215 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
8216 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
8217 #else
8218 dest = (void *)(phdr->p_paddr);
8219 #endif
8220 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
8221 error("Destination address too large");
8222 #endif
8223 #ifndef CONFIG_RELOCATABLE
8224 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
8225 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
8226 error("Wrong destination address");
8227 #endif
8228
8229 diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
8230 index bcbd36c..b1754af 100644
8231 --- a/arch/x86/boot/compressed/mkpiggy.c
8232 +++ b/arch/x86/boot/compressed/mkpiggy.c
8233 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
8234
8235 offs = (olen > ilen) ? olen - ilen : 0;
8236 offs += olen >> 12; /* Add 8 bytes for each 32K block */
8237 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
8238 + offs += 64*1024; /* Add 64K bytes slack */
8239 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
8240
8241 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
8242 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
8243 index bbeb0c3..f5167ab 100644
8244 --- a/arch/x86/boot/compressed/relocs.c
8245 +++ b/arch/x86/boot/compressed/relocs.c
8246 @@ -10,8 +10,11 @@
8247 #define USE_BSD
8248 #include <endian.h>
8249
8250 +#include "../../../../include/linux/autoconf.h"
8251 +
8252 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
8253 static Elf32_Ehdr ehdr;
8254 +static Elf32_Phdr *phdr;
8255 static unsigned long reloc_count, reloc_idx;
8256 static unsigned long *relocs;
8257
8258 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
8259
8260 static int is_safe_abs_reloc(const char* sym_name)
8261 {
8262 - int i;
8263 + unsigned int i;
8264
8265 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
8266 if (!strcmp(sym_name, safe_abs_relocs[i]))
8267 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
8268 }
8269 }
8270
8271 +static void read_phdrs(FILE *fp)
8272 +{
8273 + unsigned int i;
8274 +
8275 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
8276 + if (!phdr) {
8277 + die("Unable to allocate %d program headers\n",
8278 + ehdr.e_phnum);
8279 + }
8280 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
8281 + die("Seek to %d failed: %s\n",
8282 + ehdr.e_phoff, strerror(errno));
8283 + }
8284 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
8285 + die("Cannot read ELF program headers: %s\n",
8286 + strerror(errno));
8287 + }
8288 + for(i = 0; i < ehdr.e_phnum; i++) {
8289 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
8290 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
8291 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
8292 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
8293 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
8294 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
8295 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
8296 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
8297 + }
8298 +
8299 +}
8300 +
8301 static void read_shdrs(FILE *fp)
8302 {
8303 - int i;
8304 + unsigned int i;
8305 Elf32_Shdr shdr;
8306
8307 secs = calloc(ehdr.e_shnum, sizeof(struct section));
8308 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
8309
8310 static void read_strtabs(FILE *fp)
8311 {
8312 - int i;
8313 + unsigned int i;
8314 for (i = 0; i < ehdr.e_shnum; i++) {
8315 struct section *sec = &secs[i];
8316 if (sec->shdr.sh_type != SHT_STRTAB) {
8317 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
8318
8319 static void read_symtabs(FILE *fp)
8320 {
8321 - int i,j;
8322 + unsigned int i,j;
8323 for (i = 0; i < ehdr.e_shnum; i++) {
8324 struct section *sec = &secs[i];
8325 if (sec->shdr.sh_type != SHT_SYMTAB) {
8326 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
8327
8328 static void read_relocs(FILE *fp)
8329 {
8330 - int i,j;
8331 + unsigned int i,j;
8332 + uint32_t base;
8333 +
8334 for (i = 0; i < ehdr.e_shnum; i++) {
8335 struct section *sec = &secs[i];
8336 if (sec->shdr.sh_type != SHT_REL) {
8337 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
8338 die("Cannot read symbol table: %s\n",
8339 strerror(errno));
8340 }
8341 + base = 0;
8342 + for (j = 0; j < ehdr.e_phnum; j++) {
8343 + if (phdr[j].p_type != PT_LOAD )
8344 + continue;
8345 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
8346 + continue;
8347 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
8348 + break;
8349 + }
8350 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
8351 Elf32_Rel *rel = &sec->reltab[j];
8352 - rel->r_offset = elf32_to_cpu(rel->r_offset);
8353 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
8354 rel->r_info = elf32_to_cpu(rel->r_info);
8355 }
8356 }
8357 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
8358
8359 static void print_absolute_symbols(void)
8360 {
8361 - int i;
8362 + unsigned int i;
8363 printf("Absolute symbols\n");
8364 printf(" Num: Value Size Type Bind Visibility Name\n");
8365 for (i = 0; i < ehdr.e_shnum; i++) {
8366 struct section *sec = &secs[i];
8367 char *sym_strtab;
8368 Elf32_Sym *sh_symtab;
8369 - int j;
8370 + unsigned int j;
8371
8372 if (sec->shdr.sh_type != SHT_SYMTAB) {
8373 continue;
8374 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
8375
8376 static void print_absolute_relocs(void)
8377 {
8378 - int i, printed = 0;
8379 + unsigned int i, printed = 0;
8380
8381 for (i = 0; i < ehdr.e_shnum; i++) {
8382 struct section *sec = &secs[i];
8383 struct section *sec_applies, *sec_symtab;
8384 char *sym_strtab;
8385 Elf32_Sym *sh_symtab;
8386 - int j;
8387 + unsigned int j;
8388 if (sec->shdr.sh_type != SHT_REL) {
8389 continue;
8390 }
8391 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
8392
8393 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8394 {
8395 - int i;
8396 + unsigned int i;
8397 /* Walk through the relocations */
8398 for (i = 0; i < ehdr.e_shnum; i++) {
8399 char *sym_strtab;
8400 Elf32_Sym *sh_symtab;
8401 struct section *sec_applies, *sec_symtab;
8402 - int j;
8403 + unsigned int j;
8404 struct section *sec = &secs[i];
8405
8406 if (sec->shdr.sh_type != SHT_REL) {
8407 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8408 if (sym->st_shndx == SHN_ABS) {
8409 continue;
8410 }
8411 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
8412 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
8413 + continue;
8414 +
8415 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
8416 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
8417 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
8418 + continue;
8419 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
8420 + continue;
8421 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
8422 + continue;
8423 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
8424 + continue;
8425 +#endif
8426 if (r_type == R_386_NONE || r_type == R_386_PC32) {
8427 /*
8428 * NONE can be ignored and and PC relative
8429 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
8430
8431 static void emit_relocs(int as_text)
8432 {
8433 - int i;
8434 + unsigned int i;
8435 /* Count how many relocations I have and allocate space for them. */
8436 reloc_count = 0;
8437 walk_relocs(count_reloc);
8438 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
8439 fname, strerror(errno));
8440 }
8441 read_ehdr(fp);
8442 + read_phdrs(fp);
8443 read_shdrs(fp);
8444 read_strtabs(fp);
8445 read_symtabs(fp);
8446 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8447 index 4d3ff03..e4972ff 100644
8448 --- a/arch/x86/boot/cpucheck.c
8449 +++ b/arch/x86/boot/cpucheck.c
8450 @@ -74,7 +74,7 @@ static int has_fpu(void)
8451 u16 fcw = -1, fsw = -1;
8452 u32 cr0;
8453
8454 - asm("movl %%cr0,%0" : "=r" (cr0));
8455 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
8456 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8457 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8458 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8459 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8460 {
8461 u32 f0, f1;
8462
8463 - asm("pushfl ; "
8464 + asm volatile("pushfl ; "
8465 "pushfl ; "
8466 "popl %0 ; "
8467 "movl %0,%1 ; "
8468 @@ -115,7 +115,7 @@ static void get_flags(void)
8469 set_bit(X86_FEATURE_FPU, cpu.flags);
8470
8471 if (has_eflag(X86_EFLAGS_ID)) {
8472 - asm("cpuid"
8473 + asm volatile("cpuid"
8474 : "=a" (max_intel_level),
8475 "=b" (cpu_vendor[0]),
8476 "=d" (cpu_vendor[1]),
8477 @@ -124,7 +124,7 @@ static void get_flags(void)
8478
8479 if (max_intel_level >= 0x00000001 &&
8480 max_intel_level <= 0x0000ffff) {
8481 - asm("cpuid"
8482 + asm volatile("cpuid"
8483 : "=a" (tfms),
8484 "=c" (cpu.flags[4]),
8485 "=d" (cpu.flags[0])
8486 @@ -136,7 +136,7 @@ static void get_flags(void)
8487 cpu.model += ((tfms >> 16) & 0xf) << 4;
8488 }
8489
8490 - asm("cpuid"
8491 + asm volatile("cpuid"
8492 : "=a" (max_amd_level)
8493 : "a" (0x80000000)
8494 : "ebx", "ecx", "edx");
8495 @@ -144,7 +144,7 @@ static void get_flags(void)
8496 if (max_amd_level >= 0x80000001 &&
8497 max_amd_level <= 0x8000ffff) {
8498 u32 eax = 0x80000001;
8499 - asm("cpuid"
8500 + asm volatile("cpuid"
8501 : "+a" (eax),
8502 "=c" (cpu.flags[6]),
8503 "=d" (cpu.flags[1])
8504 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8505 u32 ecx = MSR_K7_HWCR;
8506 u32 eax, edx;
8507
8508 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8509 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8510 eax &= ~(1 << 15);
8511 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8512 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8513
8514 get_flags(); /* Make sure it really did something */
8515 err = check_flags();
8516 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8517 u32 ecx = MSR_VIA_FCR;
8518 u32 eax, edx;
8519
8520 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8521 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8522 eax |= (1<<1)|(1<<7);
8523 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8524 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8525
8526 set_bit(X86_FEATURE_CX8, cpu.flags);
8527 err = check_flags();
8528 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8529 u32 eax, edx;
8530 u32 level = 1;
8531
8532 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8533 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8534 - asm("cpuid"
8535 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8536 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8537 + asm volatile("cpuid"
8538 : "+a" (level), "=d" (cpu.flags[0])
8539 : : "ecx", "ebx");
8540 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8541 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8542
8543 err = check_flags();
8544 }
8545 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8546 index b31cc54..8d69237 100644
8547 --- a/arch/x86/boot/header.S
8548 +++ b/arch/x86/boot/header.S
8549 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8550 # single linked list of
8551 # struct setup_data
8552
8553 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8554 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8555
8556 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8557 #define VO_INIT_SIZE (VO__end - VO__text)
8558 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8559 index cae3feb..ff8ff2a 100644
8560 --- a/arch/x86/boot/memory.c
8561 +++ b/arch/x86/boot/memory.c
8562 @@ -19,7 +19,7 @@
8563
8564 static int detect_memory_e820(void)
8565 {
8566 - int count = 0;
8567 + unsigned int count = 0;
8568 struct biosregs ireg, oreg;
8569 struct e820entry *desc = boot_params.e820_map;
8570 static struct e820entry buf; /* static so it is zeroed */
8571 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8572 index 11e8c6e..fdbb1ed 100644
8573 --- a/arch/x86/boot/video-vesa.c
8574 +++ b/arch/x86/boot/video-vesa.c
8575 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8576
8577 boot_params.screen_info.vesapm_seg = oreg.es;
8578 boot_params.screen_info.vesapm_off = oreg.di;
8579 + boot_params.screen_info.vesapm_size = oreg.cx;
8580 }
8581
8582 /*
8583 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8584 index d42da38..787cdf3 100644
8585 --- a/arch/x86/boot/video.c
8586 +++ b/arch/x86/boot/video.c
8587 @@ -90,7 +90,7 @@ static void store_mode_params(void)
8588 static unsigned int get_entry(void)
8589 {
8590 char entry_buf[4];
8591 - int i, len = 0;
8592 + unsigned int i, len = 0;
8593 int key;
8594 unsigned int v;
8595
8596 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8597 index 5b577d5..3c1fed4 100644
8598 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8599 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8600 @@ -8,6 +8,8 @@
8601 * including this sentence is retained in full.
8602 */
8603
8604 +#include <asm/alternative-asm.h>
8605 +
8606 .extern crypto_ft_tab
8607 .extern crypto_it_tab
8608 .extern crypto_fl_tab
8609 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8610 je B192; \
8611 leaq 32(r9),r9;
8612
8613 +#define ret pax_force_retaddr 0, 1; ret
8614 +
8615 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8616 movq r1,r2; \
8617 movq r3,r4; \
8618 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8619 index eb0566e..e3ebad8 100644
8620 --- a/arch/x86/crypto/aesni-intel_asm.S
8621 +++ b/arch/x86/crypto/aesni-intel_asm.S
8622 @@ -16,6 +16,7 @@
8623 */
8624
8625 #include <linux/linkage.h>
8626 +#include <asm/alternative-asm.h>
8627
8628 .text
8629
8630 @@ -52,6 +53,7 @@ _key_expansion_256a:
8631 pxor %xmm1, %xmm0
8632 movaps %xmm0, (%rcx)
8633 add $0x10, %rcx
8634 + pax_force_retaddr_bts
8635 ret
8636
8637 _key_expansion_192a:
8638 @@ -75,6 +77,7 @@ _key_expansion_192a:
8639 shufps $0b01001110, %xmm2, %xmm1
8640 movaps %xmm1, 16(%rcx)
8641 add $0x20, %rcx
8642 + pax_force_retaddr_bts
8643 ret
8644
8645 _key_expansion_192b:
8646 @@ -93,6 +96,7 @@ _key_expansion_192b:
8647
8648 movaps %xmm0, (%rcx)
8649 add $0x10, %rcx
8650 + pax_force_retaddr_bts
8651 ret
8652
8653 _key_expansion_256b:
8654 @@ -104,6 +108,7 @@ _key_expansion_256b:
8655 pxor %xmm1, %xmm2
8656 movaps %xmm2, (%rcx)
8657 add $0x10, %rcx
8658 + pax_force_retaddr_bts
8659 ret
8660
8661 /*
8662 @@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
8663 cmp %rcx, %rdi
8664 jb .Ldec_key_loop
8665 xor %rax, %rax
8666 + pax_force_retaddr 0, 1
8667 ret
8668 +ENDPROC(aesni_set_key)
8669
8670 /*
8671 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8672 @@ -249,7 +256,9 @@ ENTRY(aesni_enc)
8673 movups (INP), STATE # input
8674 call _aesni_enc1
8675 movups STATE, (OUTP) # output
8676 + pax_force_retaddr 0, 1
8677 ret
8678 +ENDPROC(aesni_enc)
8679
8680 /*
8681 * _aesni_enc1: internal ABI
8682 @@ -319,6 +328,7 @@ _aesni_enc1:
8683 movaps 0x70(TKEYP), KEY
8684 # aesenclast KEY, STATE # last round
8685 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
8686 + pax_force_retaddr_bts
8687 ret
8688
8689 /*
8690 @@ -482,6 +492,7 @@ _aesni_enc4:
8691 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
8692 # aesenclast KEY, STATE4
8693 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
8694 + pax_force_retaddr_bts
8695 ret
8696
8697 /*
8698 @@ -493,7 +504,9 @@ ENTRY(aesni_dec)
8699 movups (INP), STATE # input
8700 call _aesni_dec1
8701 movups STATE, (OUTP) #output
8702 + pax_force_retaddr 0, 1
8703 ret
8704 +ENDPROC(aesni_dec)
8705
8706 /*
8707 * _aesni_dec1: internal ABI
8708 @@ -563,6 +576,7 @@ _aesni_dec1:
8709 movaps 0x70(TKEYP), KEY
8710 # aesdeclast KEY, STATE # last round
8711 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
8712 + pax_force_retaddr_bts
8713 ret
8714
8715 /*
8716 @@ -726,6 +740,7 @@ _aesni_dec4:
8717 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
8718 # aesdeclast KEY, STATE4
8719 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
8720 + pax_force_retaddr_bts
8721 ret
8722
8723 /*
8724 @@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
8725 cmp $16, LEN
8726 jge .Lecb_enc_loop1
8727 .Lecb_enc_ret:
8728 + pax_force_retaddr 0, 1
8729 ret
8730 +ENDPROC(aesni_ecb_enc)
8731
8732 /*
8733 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8734 @@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
8735 cmp $16, LEN
8736 jge .Lecb_dec_loop1
8737 .Lecb_dec_ret:
8738 + pax_force_retaddr 0, 1
8739 ret
8740 +ENDPROC(aesni_ecb_dec)
8741
8742 /*
8743 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8744 @@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
8745 jge .Lcbc_enc_loop
8746 movups STATE, (IVP)
8747 .Lcbc_enc_ret:
8748 + pax_force_retaddr 0, 1
8749 ret
8750 +ENDPROC(aesni_cbc_enc)
8751
8752 /*
8753 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8754 @@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
8755 .Lcbc_dec_ret:
8756 movups IV, (IVP)
8757 .Lcbc_dec_just_ret:
8758 + pax_force_retaddr 0, 1
8759 ret
8760 +ENDPROC(aesni_cbc_dec)
8761 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8762 index 6214a9b..1f4fc9a 100644
8763 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8764 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8765 @@ -1,3 +1,5 @@
8766 +#include <asm/alternative-asm.h>
8767 +
8768 # enter ECRYPT_encrypt_bytes
8769 .text
8770 .p2align 5
8771 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8772 add %r11,%rsp
8773 mov %rdi,%rax
8774 mov %rsi,%rdx
8775 + pax_force_retaddr 0, 1
8776 ret
8777 # bytesatleast65:
8778 ._bytesatleast65:
8779 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8780 add %r11,%rsp
8781 mov %rdi,%rax
8782 mov %rsi,%rdx
8783 + pax_force_retaddr
8784 ret
8785 # enter ECRYPT_ivsetup
8786 .text
8787 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8788 add %r11,%rsp
8789 mov %rdi,%rax
8790 mov %rsi,%rdx
8791 + pax_force_retaddr
8792 ret
8793 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8794 index 35974a5..5662ae2 100644
8795 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8796 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8797 @@ -21,6 +21,7 @@
8798 .text
8799
8800 #include <asm/asm-offsets.h>
8801 +#include <asm/alternative-asm.h>
8802
8803 #define a_offset 0
8804 #define b_offset 4
8805 @@ -269,6 +270,7 @@ twofish_enc_blk:
8806
8807 popq R1
8808 movq $1,%rax
8809 + pax_force_retaddr 0, 1
8810 ret
8811
8812 twofish_dec_blk:
8813 @@ -321,4 +323,5 @@ twofish_dec_blk:
8814
8815 popq R1
8816 movq $1,%rax
8817 + pax_force_retaddr 0, 1
8818 ret
8819 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8820 index 14531ab..bc68a7b 100644
8821 --- a/arch/x86/ia32/ia32_aout.c
8822 +++ b/arch/x86/ia32/ia32_aout.c
8823 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8824 unsigned long dump_start, dump_size;
8825 struct user32 dump;
8826
8827 + memset(&dump, 0, sizeof(dump));
8828 +
8829 fs = get_fs();
8830 set_fs(KERNEL_DS);
8831 has_dumped = 1;
8832 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8833 dump_size = dump.u_ssize << PAGE_SHIFT;
8834 DUMP_WRITE(dump_start, dump_size);
8835 }
8836 - /*
8837 - * Finally dump the task struct. Not be used by gdb, but
8838 - * could be useful
8839 - */
8840 - set_fs(KERNEL_DS);
8841 - DUMP_WRITE(current, sizeof(*current));
8842 end_coredump:
8843 set_fs(fs);
8844 return has_dumped;
8845 @@ -327,6 +323,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8846 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
8847 current->mm->cached_hole_size = 0;
8848
8849 + retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8850 + if (retval < 0) {
8851 + /* Someone check-me: is this error path enough? */
8852 + send_sig(SIGKILL, current, 0);
8853 + return retval;
8854 + }
8855 +
8856 install_exec_creds(bprm);
8857 current->flags &= ~PF_FORKNOEXEC;
8858
8859 @@ -422,13 +425,6 @@ beyond_if:
8860
8861 set_brk(current->mm->start_brk, current->mm->brk);
8862
8863 - retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8864 - if (retval < 0) {
8865 - /* Someone check-me: is this error path enough? */
8866 - send_sig(SIGKILL, current, 0);
8867 - return retval;
8868 - }
8869 -
8870 current->mm->start_stack =
8871 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
8872 /* start thread */
8873 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8874 index 588a7aa..a3468b0 100644
8875 --- a/arch/x86/ia32/ia32_signal.c
8876 +++ b/arch/x86/ia32/ia32_signal.c
8877 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8878 }
8879 seg = get_fs();
8880 set_fs(KERNEL_DS);
8881 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8882 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8883 set_fs(seg);
8884 if (ret >= 0 && uoss_ptr) {
8885 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8886 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8887 */
8888 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8889 size_t frame_size,
8890 - void **fpstate)
8891 + void __user **fpstate)
8892 {
8893 unsigned long sp;
8894
8895 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8896
8897 if (used_math()) {
8898 sp = sp - sig_xstate_ia32_size;
8899 - *fpstate = (struct _fpstate_ia32 *) sp;
8900 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8901 if (save_i387_xstate_ia32(*fpstate) < 0)
8902 return (void __user *) -1L;
8903 }
8904 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8905 sp -= frame_size;
8906 /* Align the stack pointer according to the i386 ABI,
8907 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8908 - sp = ((sp + 4) & -16ul) - 4;
8909 + sp = ((sp - 12) & -16ul) - 4;
8910 return (void __user *) sp;
8911 }
8912
8913 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8914 * These are actually not used anymore, but left because some
8915 * gdb versions depend on them as a marker.
8916 */
8917 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8918 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8919 } put_user_catch(err);
8920
8921 if (err)
8922 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8923 0xb8,
8924 __NR_ia32_rt_sigreturn,
8925 0x80cd,
8926 - 0,
8927 + 0
8928 };
8929
8930 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8931 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8932
8933 if (ka->sa.sa_flags & SA_RESTORER)
8934 restorer = ka->sa.sa_restorer;
8935 + else if (current->mm->context.vdso)
8936 + /* Return stub is in 32bit vsyscall page */
8937 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8938 else
8939 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8940 - rt_sigreturn);
8941 + restorer = &frame->retcode;
8942 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8943
8944 /*
8945 * Not actually used anymore, but left because some gdb
8946 * versions need it.
8947 */
8948 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8949 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8950 } put_user_catch(err);
8951
8952 if (err)
8953 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8954 index 4edd8eb..29124b4 100644
8955 --- a/arch/x86/ia32/ia32entry.S
8956 +++ b/arch/x86/ia32/ia32entry.S
8957 @@ -13,7 +13,9 @@
8958 #include <asm/thread_info.h>
8959 #include <asm/segment.h>
8960 #include <asm/irqflags.h>
8961 +#include <asm/pgtable.h>
8962 #include <linux/linkage.h>
8963 +#include <asm/alternative-asm.h>
8964
8965 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8966 #include <linux/elf-em.h>
8967 @@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8968 ENDPROC(native_irq_enable_sysexit)
8969 #endif
8970
8971 + .macro pax_enter_kernel_user
8972 + pax_set_fptr_mask
8973 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8974 + call pax_enter_kernel_user
8975 +#endif
8976 + .endm
8977 +
8978 + .macro pax_exit_kernel_user
8979 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8980 + call pax_exit_kernel_user
8981 +#endif
8982 +#ifdef CONFIG_PAX_RANDKSTACK
8983 + pushq %rax
8984 + pushq %r11
8985 + call pax_randomize_kstack
8986 + popq %r11
8987 + popq %rax
8988 +#endif
8989 + .endm
8990 +
8991 +.macro pax_erase_kstack
8992 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8993 + call pax_erase_kstack
8994 +#endif
8995 +.endm
8996 +
8997 /*
8998 * 32bit SYSENTER instruction entry.
8999 *
9000 @@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
9001 CFI_REGISTER rsp,rbp
9002 SWAPGS_UNSAFE_STACK
9003 movq PER_CPU_VAR(kernel_stack), %rsp
9004 - addq $(KERNEL_STACK_OFFSET),%rsp
9005 - /*
9006 - * No need to follow this irqs on/off section: the syscall
9007 - * disabled irqs, here we enable it straight after entry:
9008 - */
9009 - ENABLE_INTERRUPTS(CLBR_NONE)
9010 movl %ebp,%ebp /* zero extension */
9011 pushq $__USER32_DS
9012 CFI_ADJUST_CFA_OFFSET 8
9013 @@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
9014 pushfq
9015 CFI_ADJUST_CFA_OFFSET 8
9016 /*CFI_REL_OFFSET rflags,0*/
9017 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
9018 - CFI_REGISTER rip,r10
9019 + orl $X86_EFLAGS_IF,(%rsp)
9020 + GET_THREAD_INFO(%r11)
9021 + movl TI_sysenter_return(%r11), %r11d
9022 + CFI_REGISTER rip,r11
9023 pushq $__USER32_CS
9024 CFI_ADJUST_CFA_OFFSET 8
9025 /*CFI_REL_OFFSET cs,0*/
9026 movl %eax, %eax
9027 - pushq %r10
9028 + pushq %r11
9029 CFI_ADJUST_CFA_OFFSET 8
9030 CFI_REL_OFFSET rip,0
9031 pushq %rax
9032 CFI_ADJUST_CFA_OFFSET 8
9033 cld
9034 SAVE_ARGS 0,0,1
9035 + pax_enter_kernel_user
9036 + /*
9037 + * No need to follow this irqs on/off section: the syscall
9038 + * disabled irqs, here we enable it straight after entry:
9039 + */
9040 + ENABLE_INTERRUPTS(CLBR_NONE)
9041 /* no need to do an access_ok check here because rbp has been
9042 32bit zero extended */
9043 +
9044 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9045 + mov $PAX_USER_SHADOW_BASE,%r11
9046 + add %r11,%rbp
9047 +#endif
9048 +
9049 1: movl (%rbp),%ebp
9050 .section __ex_table,"a"
9051 .quad 1b,ia32_badarg
9052 .previous
9053 - GET_THREAD_INFO(%r10)
9054 - orl $TS_COMPAT,TI_status(%r10)
9055 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9056 + GET_THREAD_INFO(%r11)
9057 + orl $TS_COMPAT,TI_status(%r11)
9058 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9059 CFI_REMEMBER_STATE
9060 jnz sysenter_tracesys
9061 cmpq $(IA32_NR_syscalls-1),%rax
9062 @@ -166,13 +202,15 @@ sysenter_do_call:
9063 sysenter_dispatch:
9064 call *ia32_sys_call_table(,%rax,8)
9065 movq %rax,RAX-ARGOFFSET(%rsp)
9066 - GET_THREAD_INFO(%r10)
9067 + GET_THREAD_INFO(%r11)
9068 DISABLE_INTERRUPTS(CLBR_NONE)
9069 TRACE_IRQS_OFF
9070 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
9071 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9072 jnz sysexit_audit
9073 sysexit_from_sys_call:
9074 - andl $~TS_COMPAT,TI_status(%r10)
9075 + pax_exit_kernel_user
9076 + pax_erase_kstack
9077 + andl $~TS_COMPAT,TI_status(%r11)
9078 /* clear IF, that popfq doesn't enable interrupts early */
9079 andl $~0x200,EFLAGS-R11(%rsp)
9080 movl RIP-R11(%rsp),%edx /* User %eip */
9081 @@ -200,6 +238,9 @@ sysexit_from_sys_call:
9082 movl %eax,%esi /* 2nd arg: syscall number */
9083 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
9084 call audit_syscall_entry
9085 +
9086 + pax_erase_kstack
9087 +
9088 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
9089 cmpq $(IA32_NR_syscalls-1),%rax
9090 ja ia32_badsys
9091 @@ -211,7 +252,7 @@ sysexit_from_sys_call:
9092 .endm
9093
9094 .macro auditsys_exit exit
9095 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9096 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9097 jnz ia32_ret_from_sys_call
9098 TRACE_IRQS_ON
9099 sti
9100 @@ -221,12 +262,12 @@ sysexit_from_sys_call:
9101 movzbl %al,%edi /* zero-extend that into %edi */
9102 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
9103 call audit_syscall_exit
9104 - GET_THREAD_INFO(%r10)
9105 + GET_THREAD_INFO(%r11)
9106 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
9107 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
9108 cli
9109 TRACE_IRQS_OFF
9110 - testl %edi,TI_flags(%r10)
9111 + testl %edi,TI_flags(%r11)
9112 jz \exit
9113 CLEAR_RREGS -ARGOFFSET
9114 jmp int_with_check
9115 @@ -244,7 +285,7 @@ sysexit_audit:
9116
9117 sysenter_tracesys:
9118 #ifdef CONFIG_AUDITSYSCALL
9119 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9120 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9121 jz sysenter_auditsys
9122 #endif
9123 SAVE_REST
9124 @@ -252,6 +293,9 @@ sysenter_tracesys:
9125 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
9126 movq %rsp,%rdi /* &pt_regs -> arg1 */
9127 call syscall_trace_enter
9128 +
9129 + pax_erase_kstack
9130 +
9131 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9132 RESTORE_REST
9133 cmpq $(IA32_NR_syscalls-1),%rax
9134 @@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
9135 ENTRY(ia32_cstar_target)
9136 CFI_STARTPROC32 simple
9137 CFI_SIGNAL_FRAME
9138 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
9139 + CFI_DEF_CFA rsp,0
9140 CFI_REGISTER rip,rcx
9141 /*CFI_REGISTER rflags,r11*/
9142 SWAPGS_UNSAFE_STACK
9143 movl %esp,%r8d
9144 CFI_REGISTER rsp,r8
9145 movq PER_CPU_VAR(kernel_stack),%rsp
9146 + SAVE_ARGS 8*6,1,1
9147 + pax_enter_kernel_user
9148 /*
9149 * No need to follow this irqs on/off section: the syscall
9150 * disabled irqs and here we enable it straight after entry:
9151 */
9152 ENABLE_INTERRUPTS(CLBR_NONE)
9153 - SAVE_ARGS 8,1,1
9154 movl %eax,%eax /* zero extension */
9155 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
9156 movq %rcx,RIP-ARGOFFSET(%rsp)
9157 @@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
9158 /* no need to do an access_ok check here because r8 has been
9159 32bit zero extended */
9160 /* hardware stack frame is complete now */
9161 +
9162 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9163 + mov $PAX_USER_SHADOW_BASE,%r11
9164 + add %r11,%r8
9165 +#endif
9166 +
9167 1: movl (%r8),%r9d
9168 .section __ex_table,"a"
9169 .quad 1b,ia32_badarg
9170 .previous
9171 - GET_THREAD_INFO(%r10)
9172 - orl $TS_COMPAT,TI_status(%r10)
9173 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9174 + GET_THREAD_INFO(%r11)
9175 + orl $TS_COMPAT,TI_status(%r11)
9176 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9177 CFI_REMEMBER_STATE
9178 jnz cstar_tracesys
9179 cmpq $IA32_NR_syscalls-1,%rax
9180 @@ -327,13 +378,15 @@ cstar_do_call:
9181 cstar_dispatch:
9182 call *ia32_sys_call_table(,%rax,8)
9183 movq %rax,RAX-ARGOFFSET(%rsp)
9184 - GET_THREAD_INFO(%r10)
9185 + GET_THREAD_INFO(%r11)
9186 DISABLE_INTERRUPTS(CLBR_NONE)
9187 TRACE_IRQS_OFF
9188 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
9189 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9190 jnz sysretl_audit
9191 sysretl_from_sys_call:
9192 - andl $~TS_COMPAT,TI_status(%r10)
9193 + pax_exit_kernel_user
9194 + pax_erase_kstack
9195 + andl $~TS_COMPAT,TI_status(%r11)
9196 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
9197 movl RIP-ARGOFFSET(%rsp),%ecx
9198 CFI_REGISTER rip,rcx
9199 @@ -361,7 +414,7 @@ sysretl_audit:
9200
9201 cstar_tracesys:
9202 #ifdef CONFIG_AUDITSYSCALL
9203 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9204 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9205 jz cstar_auditsys
9206 #endif
9207 xchgl %r9d,%ebp
9208 @@ -370,6 +423,9 @@ cstar_tracesys:
9209 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9210 movq %rsp,%rdi /* &pt_regs -> arg1 */
9211 call syscall_trace_enter
9212 +
9213 + pax_erase_kstack
9214 +
9215 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
9216 RESTORE_REST
9217 xchgl %ebp,%r9d
9218 @@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
9219 CFI_REL_OFFSET rip,RIP-RIP
9220 PARAVIRT_ADJUST_EXCEPTION_FRAME
9221 SWAPGS
9222 - /*
9223 - * No need to follow this irqs on/off section: the syscall
9224 - * disabled irqs and here we enable it straight after entry:
9225 - */
9226 - ENABLE_INTERRUPTS(CLBR_NONE)
9227 movl %eax,%eax
9228 pushq %rax
9229 CFI_ADJUST_CFA_OFFSET 8
9230 @@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
9231 /* note the registers are not zero extended to the sf.
9232 this could be a problem. */
9233 SAVE_ARGS 0,0,1
9234 - GET_THREAD_INFO(%r10)
9235 - orl $TS_COMPAT,TI_status(%r10)
9236 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9237 + pax_enter_kernel_user
9238 + /*
9239 + * No need to follow this irqs on/off section: the syscall
9240 + * disabled irqs and here we enable it straight after entry:
9241 + */
9242 + ENABLE_INTERRUPTS(CLBR_NONE)
9243 + GET_THREAD_INFO(%r11)
9244 + orl $TS_COMPAT,TI_status(%r11)
9245 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9246 jnz ia32_tracesys
9247 cmpq $(IA32_NR_syscalls-1),%rax
9248 ja ia32_badsys
9249 @@ -448,6 +505,9 @@ ia32_tracesys:
9250 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9251 movq %rsp,%rdi /* &pt_regs -> arg1 */
9252 call syscall_trace_enter
9253 +
9254 + pax_erase_kstack
9255 +
9256 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9257 RESTORE_REST
9258 cmpq $(IA32_NR_syscalls-1),%rax
9259 @@ -462,6 +522,7 @@ ia32_badsys:
9260
9261 quiet_ni_syscall:
9262 movq $-ENOSYS,%rax
9263 + pax_force_retaddr
9264 ret
9265 CFI_ENDPROC
9266
9267 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
9268 index 016218c..47ccbdd 100644
9269 --- a/arch/x86/ia32/sys_ia32.c
9270 +++ b/arch/x86/ia32/sys_ia32.c
9271 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
9272 */
9273 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
9274 {
9275 - typeof(ubuf->st_uid) uid = 0;
9276 - typeof(ubuf->st_gid) gid = 0;
9277 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
9278 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
9279 SET_UID(uid, stat->uid);
9280 SET_GID(gid, stat->gid);
9281 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9282 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
9283 }
9284 set_fs(KERNEL_DS);
9285 ret = sys_rt_sigprocmask(how,
9286 - set ? (sigset_t __user *)&s : NULL,
9287 - oset ? (sigset_t __user *)&s : NULL,
9288 + set ? (sigset_t __force_user *)&s : NULL,
9289 + oset ? (sigset_t __force_user *)&s : NULL,
9290 sigsetsize);
9291 set_fs(old_fs);
9292 if (ret)
9293 @@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9294 mm_segment_t old_fs = get_fs();
9295
9296 set_fs(KERNEL_DS);
9297 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9298 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9299 set_fs(old_fs);
9300 if (put_compat_timespec(&t, interval))
9301 return -EFAULT;
9302 @@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9303 mm_segment_t old_fs = get_fs();
9304
9305 set_fs(KERNEL_DS);
9306 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9307 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9308 set_fs(old_fs);
9309 if (!ret) {
9310 switch (_NSIG_WORDS) {
9311 @@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9312 if (copy_siginfo_from_user32(&info, uinfo))
9313 return -EFAULT;
9314 set_fs(KERNEL_DS);
9315 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9316 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9317 set_fs(old_fs);
9318 return ret;
9319 }
9320 @@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9321 return -EFAULT;
9322
9323 set_fs(KERNEL_DS);
9324 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9325 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9326 count);
9327 set_fs(old_fs);
9328
9329 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9330 index e2077d3..17d07ad 100644
9331 --- a/arch/x86/include/asm/alternative-asm.h
9332 +++ b/arch/x86/include/asm/alternative-asm.h
9333 @@ -8,10 +8,10 @@
9334
9335 #ifdef CONFIG_SMP
9336 .macro LOCK_PREFIX
9337 -1: lock
9338 +672: lock
9339 .section .smp_locks,"a"
9340 .align 4
9341 - X86_ALIGN 1b
9342 + X86_ALIGN 672b
9343 .previous
9344 .endm
9345 #else
9346 @@ -19,4 +19,43 @@
9347 .endm
9348 #endif
9349
9350 +#ifdef KERNEXEC_PLUGIN
9351 + .macro pax_force_retaddr_bts rip=0
9352 + btsq $63,\rip(%rsp)
9353 + .endm
9354 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9355 + .macro pax_force_retaddr rip=0, reload=0
9356 + btsq $63,\rip(%rsp)
9357 + .endm
9358 + .macro pax_force_fptr ptr
9359 + btsq $63,\ptr
9360 + .endm
9361 + .macro pax_set_fptr_mask
9362 + .endm
9363 +#endif
9364 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9365 + .macro pax_force_retaddr rip=0, reload=0
9366 + .if \reload
9367 + pax_set_fptr_mask
9368 + .endif
9369 + orq %r10,\rip(%rsp)
9370 + .endm
9371 + .macro pax_force_fptr ptr
9372 + orq %r10,\ptr
9373 + .endm
9374 + .macro pax_set_fptr_mask
9375 + movabs $0x8000000000000000,%r10
9376 + .endm
9377 +#endif
9378 +#else
9379 + .macro pax_force_retaddr rip=0, reload=0
9380 + .endm
9381 + .macro pax_force_fptr ptr
9382 + .endm
9383 + .macro pax_force_retaddr_bts rip=0
9384 + .endm
9385 + .macro pax_set_fptr_mask
9386 + .endm
9387 +#endif
9388 +
9389 #endif /* __ASSEMBLY__ */
9390 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9391 index c240efc..fdfadf3 100644
9392 --- a/arch/x86/include/asm/alternative.h
9393 +++ b/arch/x86/include/asm/alternative.h
9394 @@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
9395 " .byte 662b-661b\n" /* sourcelen */ \
9396 " .byte 664f-663f\n" /* replacementlen */ \
9397 ".previous\n" \
9398 - ".section .altinstr_replacement, \"ax\"\n" \
9399 + ".section .altinstr_replacement, \"a\"\n" \
9400 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9401 ".previous"
9402
9403 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9404 index 474d80d..1f97d58 100644
9405 --- a/arch/x86/include/asm/apic.h
9406 +++ b/arch/x86/include/asm/apic.h
9407 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
9408
9409 #ifdef CONFIG_X86_LOCAL_APIC
9410
9411 -extern unsigned int apic_verbosity;
9412 +extern int apic_verbosity;
9413 extern int local_apic_timer_c2_ok;
9414
9415 extern int disable_apic;
9416 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9417 index 20370c6..a2eb9b0 100644
9418 --- a/arch/x86/include/asm/apm.h
9419 +++ b/arch/x86/include/asm/apm.h
9420 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9421 __asm__ __volatile__(APM_DO_ZERO_SEGS
9422 "pushl %%edi\n\t"
9423 "pushl %%ebp\n\t"
9424 - "lcall *%%cs:apm_bios_entry\n\t"
9425 + "lcall *%%ss:apm_bios_entry\n\t"
9426 "setc %%al\n\t"
9427 "popl %%ebp\n\t"
9428 "popl %%edi\n\t"
9429 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9430 __asm__ __volatile__(APM_DO_ZERO_SEGS
9431 "pushl %%edi\n\t"
9432 "pushl %%ebp\n\t"
9433 - "lcall *%%cs:apm_bios_entry\n\t"
9434 + "lcall *%%ss:apm_bios_entry\n\t"
9435 "setc %%bl\n\t"
9436 "popl %%ebp\n\t"
9437 "popl %%edi\n\t"
9438 diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
9439 index dc5a667..939040c 100644
9440 --- a/arch/x86/include/asm/atomic_32.h
9441 +++ b/arch/x86/include/asm/atomic_32.h
9442 @@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
9443 }
9444
9445 /**
9446 + * atomic_read_unchecked - read atomic variable
9447 + * @v: pointer of type atomic_unchecked_t
9448 + *
9449 + * Atomically reads the value of @v.
9450 + */
9451 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9452 +{
9453 + return v->counter;
9454 +}
9455 +
9456 +/**
9457 * atomic_set - set atomic variable
9458 * @v: pointer of type atomic_t
9459 * @i: required value
9460 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
9461 }
9462
9463 /**
9464 + * atomic_set_unchecked - set atomic variable
9465 + * @v: pointer of type atomic_unchecked_t
9466 + * @i: required value
9467 + *
9468 + * Atomically sets the value of @v to @i.
9469 + */
9470 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9471 +{
9472 + v->counter = i;
9473 +}
9474 +
9475 +/**
9476 * atomic_add - add integer to atomic variable
9477 * @i: integer value to add
9478 * @v: pointer of type atomic_t
9479 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
9480 */
9481 static inline void atomic_add(int i, atomic_t *v)
9482 {
9483 - asm volatile(LOCK_PREFIX "addl %1,%0"
9484 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9485 +
9486 +#ifdef CONFIG_PAX_REFCOUNT
9487 + "jno 0f\n"
9488 + LOCK_PREFIX "subl %1,%0\n"
9489 + "int $4\n0:\n"
9490 + _ASM_EXTABLE(0b, 0b)
9491 +#endif
9492 +
9493 + : "+m" (v->counter)
9494 + : "ir" (i));
9495 +}
9496 +
9497 +/**
9498 + * atomic_add_unchecked - add integer to atomic variable
9499 + * @i: integer value to add
9500 + * @v: pointer of type atomic_unchecked_t
9501 + *
9502 + * Atomically adds @i to @v.
9503 + */
9504 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9505 +{
9506 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9507 : "+m" (v->counter)
9508 : "ir" (i));
9509 }
9510 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
9511 */
9512 static inline void atomic_sub(int i, atomic_t *v)
9513 {
9514 - asm volatile(LOCK_PREFIX "subl %1,%0"
9515 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9516 +
9517 +#ifdef CONFIG_PAX_REFCOUNT
9518 + "jno 0f\n"
9519 + LOCK_PREFIX "addl %1,%0\n"
9520 + "int $4\n0:\n"
9521 + _ASM_EXTABLE(0b, 0b)
9522 +#endif
9523 +
9524 + : "+m" (v->counter)
9525 + : "ir" (i));
9526 +}
9527 +
9528 +/**
9529 + * atomic_sub_unchecked - subtract integer from atomic variable
9530 + * @i: integer value to subtract
9531 + * @v: pointer of type atomic_unchecked_t
9532 + *
9533 + * Atomically subtracts @i from @v.
9534 + */
9535 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9536 +{
9537 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9538 : "+m" (v->counter)
9539 : "ir" (i));
9540 }
9541 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9542 {
9543 unsigned char c;
9544
9545 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9546 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9547 +
9548 +#ifdef CONFIG_PAX_REFCOUNT
9549 + "jno 0f\n"
9550 + LOCK_PREFIX "addl %2,%0\n"
9551 + "int $4\n0:\n"
9552 + _ASM_EXTABLE(0b, 0b)
9553 +#endif
9554 +
9555 + "sete %1\n"
9556 : "+m" (v->counter), "=qm" (c)
9557 : "ir" (i) : "memory");
9558 return c;
9559 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9560 */
9561 static inline void atomic_inc(atomic_t *v)
9562 {
9563 - asm volatile(LOCK_PREFIX "incl %0"
9564 + asm volatile(LOCK_PREFIX "incl %0\n"
9565 +
9566 +#ifdef CONFIG_PAX_REFCOUNT
9567 + "jno 0f\n"
9568 + LOCK_PREFIX "decl %0\n"
9569 + "int $4\n0:\n"
9570 + _ASM_EXTABLE(0b, 0b)
9571 +#endif
9572 +
9573 + : "+m" (v->counter));
9574 +}
9575 +
9576 +/**
9577 + * atomic_inc_unchecked - increment atomic variable
9578 + * @v: pointer of type atomic_unchecked_t
9579 + *
9580 + * Atomically increments @v by 1.
9581 + */
9582 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9583 +{
9584 + asm volatile(LOCK_PREFIX "incl %0\n"
9585 : "+m" (v->counter));
9586 }
9587
9588 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
9589 */
9590 static inline void atomic_dec(atomic_t *v)
9591 {
9592 - asm volatile(LOCK_PREFIX "decl %0"
9593 + asm volatile(LOCK_PREFIX "decl %0\n"
9594 +
9595 +#ifdef CONFIG_PAX_REFCOUNT
9596 + "jno 0f\n"
9597 + LOCK_PREFIX "incl %0\n"
9598 + "int $4\n0:\n"
9599 + _ASM_EXTABLE(0b, 0b)
9600 +#endif
9601 +
9602 + : "+m" (v->counter));
9603 +}
9604 +
9605 +/**
9606 + * atomic_dec_unchecked - decrement atomic variable
9607 + * @v: pointer of type atomic_unchecked_t
9608 + *
9609 + * Atomically decrements @v by 1.
9610 + */
9611 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9612 +{
9613 + asm volatile(LOCK_PREFIX "decl %0\n"
9614 : "+m" (v->counter));
9615 }
9616
9617 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9618 {
9619 unsigned char c;
9620
9621 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9622 + asm volatile(LOCK_PREFIX "decl %0\n"
9623 +
9624 +#ifdef CONFIG_PAX_REFCOUNT
9625 + "jno 0f\n"
9626 + LOCK_PREFIX "incl %0\n"
9627 + "int $4\n0:\n"
9628 + _ASM_EXTABLE(0b, 0b)
9629 +#endif
9630 +
9631 + "sete %1\n"
9632 : "+m" (v->counter), "=qm" (c)
9633 : : "memory");
9634 return c != 0;
9635 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9636 {
9637 unsigned char c;
9638
9639 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9640 + asm volatile(LOCK_PREFIX "incl %0\n"
9641 +
9642 +#ifdef CONFIG_PAX_REFCOUNT
9643 + "jno 0f\n"
9644 + LOCK_PREFIX "decl %0\n"
9645 + "into\n0:\n"
9646 + _ASM_EXTABLE(0b, 0b)
9647 +#endif
9648 +
9649 + "sete %1\n"
9650 + : "+m" (v->counter), "=qm" (c)
9651 + : : "memory");
9652 + return c != 0;
9653 +}
9654 +
9655 +/**
9656 + * atomic_inc_and_test_unchecked - increment and test
9657 + * @v: pointer of type atomic_unchecked_t
9658 + *
9659 + * Atomically increments @v by 1
9660 + * and returns true if the result is zero, or false for all
9661 + * other cases.
9662 + */
9663 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9664 +{
9665 + unsigned char c;
9666 +
9667 + asm volatile(LOCK_PREFIX "incl %0\n"
9668 + "sete %1\n"
9669 : "+m" (v->counter), "=qm" (c)
9670 : : "memory");
9671 return c != 0;
9672 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9673 {
9674 unsigned char c;
9675
9676 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9677 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9678 +
9679 +#ifdef CONFIG_PAX_REFCOUNT
9680 + "jno 0f\n"
9681 + LOCK_PREFIX "subl %2,%0\n"
9682 + "int $4\n0:\n"
9683 + _ASM_EXTABLE(0b, 0b)
9684 +#endif
9685 +
9686 + "sets %1\n"
9687 : "+m" (v->counter), "=qm" (c)
9688 : "ir" (i) : "memory");
9689 return c;
9690 @@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
9691 #endif
9692 /* Modern 486+ processor */
9693 __i = i;
9694 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
9695 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9696 +
9697 +#ifdef CONFIG_PAX_REFCOUNT
9698 + "jno 0f\n"
9699 + "movl %0, %1\n"
9700 + "int $4\n0:\n"
9701 + _ASM_EXTABLE(0b, 0b)
9702 +#endif
9703 +
9704 : "+r" (i), "+m" (v->counter)
9705 : : "memory");
9706 return i + __i;
9707 @@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
9708 }
9709
9710 /**
9711 + * atomic_add_return_unchecked - add integer and return
9712 + * @v: pointer of type atomic_unchecked_t
9713 + * @i: integer value to add
9714 + *
9715 + * Atomically adds @i to @v and returns @i + @v
9716 + */
9717 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9718 +{
9719 + int __i;
9720 +#ifdef CONFIG_M386
9721 + unsigned long flags;
9722 + if (unlikely(boot_cpu_data.x86 <= 3))
9723 + goto no_xadd;
9724 +#endif
9725 + /* Modern 486+ processor */
9726 + __i = i;
9727 + asm volatile(LOCK_PREFIX "xaddl %0, %1"
9728 + : "+r" (i), "+m" (v->counter)
9729 + : : "memory");
9730 + return i + __i;
9731 +
9732 +#ifdef CONFIG_M386
9733 +no_xadd: /* Legacy 386 processor */
9734 + local_irq_save(flags);
9735 + __i = atomic_read_unchecked(v);
9736 + atomic_set_unchecked(v, i + __i);
9737 + local_irq_restore(flags);
9738 + return i + __i;
9739 +#endif
9740 +}
9741 +
9742 +/**
9743 * atomic_sub_return - subtract integer and return
9744 * @v: pointer of type atomic_t
9745 * @i: integer value to subtract
9746 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9747 return cmpxchg(&v->counter, old, new);
9748 }
9749
9750 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9751 +{
9752 + return cmpxchg(&v->counter, old, new);
9753 +}
9754 +
9755 static inline int atomic_xchg(atomic_t *v, int new)
9756 {
9757 return xchg(&v->counter, new);
9758 }
9759
9760 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9761 +{
9762 + return xchg(&v->counter, new);
9763 +}
9764 +
9765 /**
9766 * atomic_add_unless - add unless the number is already a given value
9767 * @v: pointer of type atomic_t
9768 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
9769 */
9770 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9771 {
9772 - int c, old;
9773 + int c, old, new;
9774 c = atomic_read(v);
9775 for (;;) {
9776 - if (unlikely(c == (u)))
9777 + if (unlikely(c == u))
9778 break;
9779 - old = atomic_cmpxchg((v), c, c + (a));
9780 +
9781 + asm volatile("addl %2,%0\n"
9782 +
9783 +#ifdef CONFIG_PAX_REFCOUNT
9784 + "jno 0f\n"
9785 + "subl %2,%0\n"
9786 + "int $4\n0:\n"
9787 + _ASM_EXTABLE(0b, 0b)
9788 +#endif
9789 +
9790 + : "=r" (new)
9791 + : "0" (c), "ir" (a));
9792 +
9793 + old = atomic_cmpxchg(v, c, new);
9794 if (likely(old == c))
9795 break;
9796 c = old;
9797 }
9798 - return c != (u);
9799 + return c != u;
9800 }
9801
9802 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9803
9804 #define atomic_inc_return(v) (atomic_add_return(1, v))
9805 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9806 +{
9807 + return atomic_add_return_unchecked(1, v);
9808 +}
9809 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9810
9811 /* These are x86-specific, used by some header files */
9812 @@ -266,9 +495,18 @@ typedef struct {
9813 u64 __aligned(8) counter;
9814 } atomic64_t;
9815
9816 +#ifdef CONFIG_PAX_REFCOUNT
9817 +typedef struct {
9818 + u64 __aligned(8) counter;
9819 +} atomic64_unchecked_t;
9820 +#else
9821 +typedef atomic64_t atomic64_unchecked_t;
9822 +#endif
9823 +
9824 #define ATOMIC64_INIT(val) { (val) }
9825
9826 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9827 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
9828
9829 /**
9830 * atomic64_xchg - xchg atomic64 variable
9831 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9832 * the old value.
9833 */
9834 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9835 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9836
9837 /**
9838 * atomic64_set - set atomic64 variable
9839 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9840 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
9841
9842 /**
9843 + * atomic64_unchecked_set - set atomic64 variable
9844 + * @ptr: pointer to type atomic64_unchecked_t
9845 + * @new_val: value to assign
9846 + *
9847 + * Atomically sets the value of @ptr to @new_val.
9848 + */
9849 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9850 +
9851 +/**
9852 * atomic64_read - read atomic64 variable
9853 * @ptr: pointer to type atomic64_t
9854 *
9855 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
9856 return res;
9857 }
9858
9859 -extern u64 atomic64_read(atomic64_t *ptr);
9860 +/**
9861 + * atomic64_read_unchecked - read atomic64 variable
9862 + * @ptr: pointer to type atomic64_unchecked_t
9863 + *
9864 + * Atomically reads the value of @ptr and returns it.
9865 + */
9866 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
9867 +{
9868 + u64 res;
9869 +
9870 + /*
9871 + * Note, we inline this atomic64_unchecked_t primitive because
9872 + * it only clobbers EAX/EDX and leaves the others
9873 + * untouched. We also (somewhat subtly) rely on the
9874 + * fact that cmpxchg8b returns the current 64-bit value
9875 + * of the memory location we are touching:
9876 + */
9877 + asm volatile(
9878 + "mov %%ebx, %%eax\n\t"
9879 + "mov %%ecx, %%edx\n\t"
9880 + LOCK_PREFIX "cmpxchg8b %1\n"
9881 + : "=&A" (res)
9882 + : "m" (*ptr)
9883 + );
9884 +
9885 + return res;
9886 +}
9887
9888 /**
9889 * atomic64_add_return - add and return
9890 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9891 * Other variants with different arithmetic operators:
9892 */
9893 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9894 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9895 extern u64 atomic64_inc_return(atomic64_t *ptr);
9896 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9897 extern u64 atomic64_dec_return(atomic64_t *ptr);
9898 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9899
9900 /**
9901 * atomic64_add - add integer to atomic64 variable
9902 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9903 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9904
9905 /**
9906 + * atomic64_add_unchecked - add integer to atomic64 variable
9907 + * @delta: integer value to add
9908 + * @ptr: pointer to type atomic64_unchecked_t
9909 + *
9910 + * Atomically adds @delta to @ptr.
9911 + */
9912 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9913 +
9914 +/**
9915 * atomic64_sub - subtract the atomic64 variable
9916 * @delta: integer value to subtract
9917 * @ptr: pointer to type atomic64_t
9918 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9919 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9920
9921 /**
9922 + * atomic64_sub_unchecked - subtract the atomic64 variable
9923 + * @delta: integer value to subtract
9924 + * @ptr: pointer to type atomic64_unchecked_t
9925 + *
9926 + * Atomically subtracts @delta from @ptr.
9927 + */
9928 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9929 +
9930 +/**
9931 * atomic64_sub_and_test - subtract value from variable and test result
9932 * @delta: integer value to subtract
9933 * @ptr: pointer to type atomic64_t
9934 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9935 extern void atomic64_inc(atomic64_t *ptr);
9936
9937 /**
9938 + * atomic64_inc_unchecked - increment atomic64 variable
9939 + * @ptr: pointer to type atomic64_unchecked_t
9940 + *
9941 + * Atomically increments @ptr by 1.
9942 + */
9943 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9944 +
9945 +/**
9946 * atomic64_dec - decrement atomic64 variable
9947 * @ptr: pointer to type atomic64_t
9948 *
9949 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9950 extern void atomic64_dec(atomic64_t *ptr);
9951
9952 /**
9953 + * atomic64_dec_unchecked - decrement atomic64 variable
9954 + * @ptr: pointer to type atomic64_unchecked_t
9955 + *
9956 + * Atomically decrements @ptr by 1.
9957 + */
9958 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9959 +
9960 +/**
9961 * atomic64_dec_and_test - decrement and test
9962 * @ptr: pointer to type atomic64_t
9963 *
9964 diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9965 index d605dc2..fafd7bd 100644
9966 --- a/arch/x86/include/asm/atomic_64.h
9967 +++ b/arch/x86/include/asm/atomic_64.h
9968 @@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9969 }
9970
9971 /**
9972 + * atomic_read_unchecked - read atomic variable
9973 + * @v: pointer of type atomic_unchecked_t
9974 + *
9975 + * Atomically reads the value of @v.
9976 + */
9977 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9978 +{
9979 + return v->counter;
9980 +}
9981 +
9982 +/**
9983 * atomic_set - set atomic variable
9984 * @v: pointer of type atomic_t
9985 * @i: required value
9986 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9987 }
9988
9989 /**
9990 + * atomic_set_unchecked - set atomic variable
9991 + * @v: pointer of type atomic_unchecked_t
9992 + * @i: required value
9993 + *
9994 + * Atomically sets the value of @v to @i.
9995 + */
9996 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9997 +{
9998 + v->counter = i;
9999 +}
10000 +
10001 +/**
10002 * atomic_add - add integer to atomic variable
10003 * @i: integer value to add
10004 * @v: pointer of type atomic_t
10005 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
10006 */
10007 static inline void atomic_add(int i, atomic_t *v)
10008 {
10009 - asm volatile(LOCK_PREFIX "addl %1,%0"
10010 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
10011 +
10012 +#ifdef CONFIG_PAX_REFCOUNT
10013 + "jno 0f\n"
10014 + LOCK_PREFIX "subl %1,%0\n"
10015 + "int $4\n0:\n"
10016 + _ASM_EXTABLE(0b, 0b)
10017 +#endif
10018 +
10019 + : "=m" (v->counter)
10020 + : "ir" (i), "m" (v->counter));
10021 +}
10022 +
10023 +/**
10024 + * atomic_add_unchecked - add integer to atomic variable
10025 + * @i: integer value to add
10026 + * @v: pointer of type atomic_unchecked_t
10027 + *
10028 + * Atomically adds @i to @v.
10029 + */
10030 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
10031 +{
10032 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
10033 : "=m" (v->counter)
10034 : "ir" (i), "m" (v->counter));
10035 }
10036 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
10037 */
10038 static inline void atomic_sub(int i, atomic_t *v)
10039 {
10040 - asm volatile(LOCK_PREFIX "subl %1,%0"
10041 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
10042 +
10043 +#ifdef CONFIG_PAX_REFCOUNT
10044 + "jno 0f\n"
10045 + LOCK_PREFIX "addl %1,%0\n"
10046 + "int $4\n0:\n"
10047 + _ASM_EXTABLE(0b, 0b)
10048 +#endif
10049 +
10050 + : "=m" (v->counter)
10051 + : "ir" (i), "m" (v->counter));
10052 +}
10053 +
10054 +/**
10055 + * atomic_sub_unchecked - subtract the atomic variable
10056 + * @i: integer value to subtract
10057 + * @v: pointer of type atomic_unchecked_t
10058 + *
10059 + * Atomically subtracts @i from @v.
10060 + */
10061 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
10062 +{
10063 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
10064 : "=m" (v->counter)
10065 : "ir" (i), "m" (v->counter));
10066 }
10067 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10068 {
10069 unsigned char c;
10070
10071 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
10072 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
10073 +
10074 +#ifdef CONFIG_PAX_REFCOUNT
10075 + "jno 0f\n"
10076 + LOCK_PREFIX "addl %2,%0\n"
10077 + "int $4\n0:\n"
10078 + _ASM_EXTABLE(0b, 0b)
10079 +#endif
10080 +
10081 + "sete %1\n"
10082 : "=m" (v->counter), "=qm" (c)
10083 : "ir" (i), "m" (v->counter) : "memory");
10084 return c;
10085 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10086 */
10087 static inline void atomic_inc(atomic_t *v)
10088 {
10089 - asm volatile(LOCK_PREFIX "incl %0"
10090 + asm volatile(LOCK_PREFIX "incl %0\n"
10091 +
10092 +#ifdef CONFIG_PAX_REFCOUNT
10093 + "jno 0f\n"
10094 + LOCK_PREFIX "decl %0\n"
10095 + "int $4\n0:\n"
10096 + _ASM_EXTABLE(0b, 0b)
10097 +#endif
10098 +
10099 + : "=m" (v->counter)
10100 + : "m" (v->counter));
10101 +}
10102 +
10103 +/**
10104 + * atomic_inc_unchecked - increment atomic variable
10105 + * @v: pointer of type atomic_unchecked_t
10106 + *
10107 + * Atomically increments @v by 1.
10108 + */
10109 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10110 +{
10111 + asm volatile(LOCK_PREFIX "incl %0\n"
10112 : "=m" (v->counter)
10113 : "m" (v->counter));
10114 }
10115 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
10116 */
10117 static inline void atomic_dec(atomic_t *v)
10118 {
10119 - asm volatile(LOCK_PREFIX "decl %0"
10120 + asm volatile(LOCK_PREFIX "decl %0\n"
10121 +
10122 +#ifdef CONFIG_PAX_REFCOUNT
10123 + "jno 0f\n"
10124 + LOCK_PREFIX "incl %0\n"
10125 + "int $4\n0:\n"
10126 + _ASM_EXTABLE(0b, 0b)
10127 +#endif
10128 +
10129 + : "=m" (v->counter)
10130 + : "m" (v->counter));
10131 +}
10132 +
10133 +/**
10134 + * atomic_dec_unchecked - decrement atomic variable
10135 + * @v: pointer of type atomic_unchecked_t
10136 + *
10137 + * Atomically decrements @v by 1.
10138 + */
10139 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10140 +{
10141 + asm volatile(LOCK_PREFIX "decl %0\n"
10142 : "=m" (v->counter)
10143 : "m" (v->counter));
10144 }
10145 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
10146 {
10147 unsigned char c;
10148
10149 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
10150 + asm volatile(LOCK_PREFIX "decl %0\n"
10151 +
10152 +#ifdef CONFIG_PAX_REFCOUNT
10153 + "jno 0f\n"
10154 + LOCK_PREFIX "incl %0\n"
10155 + "int $4\n0:\n"
10156 + _ASM_EXTABLE(0b, 0b)
10157 +#endif
10158 +
10159 + "sete %1\n"
10160 : "=m" (v->counter), "=qm" (c)
10161 : "m" (v->counter) : "memory");
10162 return c != 0;
10163 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
10164 {
10165 unsigned char c;
10166
10167 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
10168 + asm volatile(LOCK_PREFIX "incl %0\n"
10169 +
10170 +#ifdef CONFIG_PAX_REFCOUNT
10171 + "jno 0f\n"
10172 + LOCK_PREFIX "decl %0\n"
10173 + "int $4\n0:\n"
10174 + _ASM_EXTABLE(0b, 0b)
10175 +#endif
10176 +
10177 + "sete %1\n"
10178 + : "=m" (v->counter), "=qm" (c)
10179 + : "m" (v->counter) : "memory");
10180 + return c != 0;
10181 +}
10182 +
10183 +/**
10184 + * atomic_inc_and_test_unchecked - increment and test
10185 + * @v: pointer of type atomic_unchecked_t
10186 + *
10187 + * Atomically increments @v by 1
10188 + * and returns true if the result is zero, or false for all
10189 + * other cases.
10190 + */
10191 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10192 +{
10193 + unsigned char c;
10194 +
10195 + asm volatile(LOCK_PREFIX "incl %0\n"
10196 + "sete %1\n"
10197 : "=m" (v->counter), "=qm" (c)
10198 : "m" (v->counter) : "memory");
10199 return c != 0;
10200 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10201 {
10202 unsigned char c;
10203
10204 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
10205 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
10206 +
10207 +#ifdef CONFIG_PAX_REFCOUNT
10208 + "jno 0f\n"
10209 + LOCK_PREFIX "subl %2,%0\n"
10210 + "int $4\n0:\n"
10211 + _ASM_EXTABLE(0b, 0b)
10212 +#endif
10213 +
10214 + "sets %1\n"
10215 : "=m" (v->counter), "=qm" (c)
10216 : "ir" (i), "m" (v->counter) : "memory");
10217 return c;
10218 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10219 static inline int atomic_add_return(int i, atomic_t *v)
10220 {
10221 int __i = i;
10222 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
10223 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
10224 +
10225 +#ifdef CONFIG_PAX_REFCOUNT
10226 + "jno 0f\n"
10227 + "movl %0, %1\n"
10228 + "int $4\n0:\n"
10229 + _ASM_EXTABLE(0b, 0b)
10230 +#endif
10231 +
10232 + : "+r" (i), "+m" (v->counter)
10233 + : : "memory");
10234 + return i + __i;
10235 +}
10236 +
10237 +/**
10238 + * atomic_add_return_unchecked - add and return
10239 + * @i: integer value to add
10240 + * @v: pointer of type atomic_unchecked_t
10241 + *
10242 + * Atomically adds @i to @v and returns @i + @v
10243 + */
10244 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10245 +{
10246 + int __i = i;
10247 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
10248 : "+r" (i), "+m" (v->counter)
10249 : : "memory");
10250 return i + __i;
10251 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10252 }
10253
10254 #define atomic_inc_return(v) (atomic_add_return(1, v))
10255 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10256 +{
10257 + return atomic_add_return_unchecked(1, v);
10258 +}
10259 #define atomic_dec_return(v) (atomic_sub_return(1, v))
10260
10261 /* The 64-bit atomic type */
10262 @@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
10263 }
10264
10265 /**
10266 + * atomic64_read_unchecked - read atomic64 variable
10267 + * @v: pointer of type atomic64_unchecked_t
10268 + *
10269 + * Atomically reads the value of @v.
10270 + * Doesn't imply a read memory barrier.
10271 + */
10272 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10273 +{
10274 + return v->counter;
10275 +}
10276 +
10277 +/**
10278 * atomic64_set - set atomic64 variable
10279 * @v: pointer to type atomic64_t
10280 * @i: required value
10281 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
10282 }
10283
10284 /**
10285 + * atomic64_set_unchecked - set atomic64 variable
10286 + * @v: pointer to type atomic64_unchecked_t
10287 + * @i: required value
10288 + *
10289 + * Atomically sets the value of @v to @i.
10290 + */
10291 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10292 +{
10293 + v->counter = i;
10294 +}
10295 +
10296 +/**
10297 * atomic64_add - add integer to atomic64 variable
10298 * @i: integer value to add
10299 * @v: pointer to type atomic64_t
10300 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
10301 */
10302 static inline void atomic64_add(long i, atomic64_t *v)
10303 {
10304 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
10305 +
10306 +#ifdef CONFIG_PAX_REFCOUNT
10307 + "jno 0f\n"
10308 + LOCK_PREFIX "subq %1,%0\n"
10309 + "int $4\n0:\n"
10310 + _ASM_EXTABLE(0b, 0b)
10311 +#endif
10312 +
10313 + : "=m" (v->counter)
10314 + : "er" (i), "m" (v->counter));
10315 +}
10316 +
10317 +/**
10318 + * atomic64_add_unchecked - add integer to atomic64 variable
10319 + * @i: integer value to add
10320 + * @v: pointer to type atomic64_unchecked_t
10321 + *
10322 + * Atomically adds @i to @v.
10323 + */
10324 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
10325 +{
10326 asm volatile(LOCK_PREFIX "addq %1,%0"
10327 : "=m" (v->counter)
10328 : "er" (i), "m" (v->counter));
10329 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
10330 */
10331 static inline void atomic64_sub(long i, atomic64_t *v)
10332 {
10333 - asm volatile(LOCK_PREFIX "subq %1,%0"
10334 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
10335 +
10336 +#ifdef CONFIG_PAX_REFCOUNT
10337 + "jno 0f\n"
10338 + LOCK_PREFIX "addq %1,%0\n"
10339 + "int $4\n0:\n"
10340 + _ASM_EXTABLE(0b, 0b)
10341 +#endif
10342 +
10343 : "=m" (v->counter)
10344 : "er" (i), "m" (v->counter));
10345 }
10346 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10347 {
10348 unsigned char c;
10349
10350 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
10351 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
10352 +
10353 +#ifdef CONFIG_PAX_REFCOUNT
10354 + "jno 0f\n"
10355 + LOCK_PREFIX "addq %2,%0\n"
10356 + "int $4\n0:\n"
10357 + _ASM_EXTABLE(0b, 0b)
10358 +#endif
10359 +
10360 + "sete %1\n"
10361 : "=m" (v->counter), "=qm" (c)
10362 : "er" (i), "m" (v->counter) : "memory");
10363 return c;
10364 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10365 */
10366 static inline void atomic64_inc(atomic64_t *v)
10367 {
10368 + asm volatile(LOCK_PREFIX "incq %0\n"
10369 +
10370 +#ifdef CONFIG_PAX_REFCOUNT
10371 + "jno 0f\n"
10372 + LOCK_PREFIX "decq %0\n"
10373 + "int $4\n0:\n"
10374 + _ASM_EXTABLE(0b, 0b)
10375 +#endif
10376 +
10377 + : "=m" (v->counter)
10378 + : "m" (v->counter));
10379 +}
10380 +
10381 +/**
10382 + * atomic64_inc_unchecked - increment atomic64 variable
10383 + * @v: pointer to type atomic64_unchecked_t
10384 + *
10385 + * Atomically increments @v by 1.
10386 + */
10387 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10388 +{
10389 asm volatile(LOCK_PREFIX "incq %0"
10390 : "=m" (v->counter)
10391 : "m" (v->counter));
10392 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
10393 */
10394 static inline void atomic64_dec(atomic64_t *v)
10395 {
10396 - asm volatile(LOCK_PREFIX "decq %0"
10397 + asm volatile(LOCK_PREFIX "decq %0\n"
10398 +
10399 +#ifdef CONFIG_PAX_REFCOUNT
10400 + "jno 0f\n"
10401 + LOCK_PREFIX "incq %0\n"
10402 + "int $4\n0:\n"
10403 + _ASM_EXTABLE(0b, 0b)
10404 +#endif
10405 +
10406 + : "=m" (v->counter)
10407 + : "m" (v->counter));
10408 +}
10409 +
10410 +/**
10411 + * atomic64_dec_unchecked - decrement atomic64 variable
10412 + * @v: pointer to type atomic64_t
10413 + *
10414 + * Atomically decrements @v by 1.
10415 + */
10416 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10417 +{
10418 + asm volatile(LOCK_PREFIX "decq %0\n"
10419 : "=m" (v->counter)
10420 : "m" (v->counter));
10421 }
10422 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
10423 {
10424 unsigned char c;
10425
10426 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
10427 + asm volatile(LOCK_PREFIX "decq %0\n"
10428 +
10429 +#ifdef CONFIG_PAX_REFCOUNT
10430 + "jno 0f\n"
10431 + LOCK_PREFIX "incq %0\n"
10432 + "int $4\n0:\n"
10433 + _ASM_EXTABLE(0b, 0b)
10434 +#endif
10435 +
10436 + "sete %1\n"
10437 : "=m" (v->counter), "=qm" (c)
10438 : "m" (v->counter) : "memory");
10439 return c != 0;
10440 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
10441 {
10442 unsigned char c;
10443
10444 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
10445 + asm volatile(LOCK_PREFIX "incq %0\n"
10446 +
10447 +#ifdef CONFIG_PAX_REFCOUNT
10448 + "jno 0f\n"
10449 + LOCK_PREFIX "decq %0\n"
10450 + "int $4\n0:\n"
10451 + _ASM_EXTABLE(0b, 0b)
10452 +#endif
10453 +
10454 + "sete %1\n"
10455 : "=m" (v->counter), "=qm" (c)
10456 : "m" (v->counter) : "memory");
10457 return c != 0;
10458 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10459 {
10460 unsigned char c;
10461
10462 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
10463 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
10464 +
10465 +#ifdef CONFIG_PAX_REFCOUNT
10466 + "jno 0f\n"
10467 + LOCK_PREFIX "subq %2,%0\n"
10468 + "int $4\n0:\n"
10469 + _ASM_EXTABLE(0b, 0b)
10470 +#endif
10471 +
10472 + "sets %1\n"
10473 : "=m" (v->counter), "=qm" (c)
10474 : "er" (i), "m" (v->counter) : "memory");
10475 return c;
10476 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10477 static inline long atomic64_add_return(long i, atomic64_t *v)
10478 {
10479 long __i = i;
10480 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
10481 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
10482 +
10483 +#ifdef CONFIG_PAX_REFCOUNT
10484 + "jno 0f\n"
10485 + "movq %0, %1\n"
10486 + "int $4\n0:\n"
10487 + _ASM_EXTABLE(0b, 0b)
10488 +#endif
10489 +
10490 + : "+r" (i), "+m" (v->counter)
10491 + : : "memory");
10492 + return i + __i;
10493 +}
10494 +
10495 +/**
10496 + * atomic64_add_return_unchecked - add and return
10497 + * @i: integer value to add
10498 + * @v: pointer to type atomic64_unchecked_t
10499 + *
10500 + * Atomically adds @i to @v and returns @i + @v
10501 + */
10502 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10503 +{
10504 + long __i = i;
10505 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
10506 : "+r" (i), "+m" (v->counter)
10507 : : "memory");
10508 return i + __i;
10509 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10510 }
10511
10512 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10513 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10514 +{
10515 + return atomic64_add_return_unchecked(1, v);
10516 +}
10517 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10518
10519 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10520 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10521 return cmpxchg(&v->counter, old, new);
10522 }
10523
10524 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10525 +{
10526 + return cmpxchg(&v->counter, old, new);
10527 +}
10528 +
10529 static inline long atomic64_xchg(atomic64_t *v, long new)
10530 {
10531 return xchg(&v->counter, new);
10532 }
10533
10534 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10535 +{
10536 + return xchg(&v->counter, new);
10537 +}
10538 +
10539 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
10540 {
10541 return cmpxchg(&v->counter, old, new);
10542 }
10543
10544 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10545 +{
10546 + return cmpxchg(&v->counter, old, new);
10547 +}
10548 +
10549 static inline long atomic_xchg(atomic_t *v, int new)
10550 {
10551 return xchg(&v->counter, new);
10552 }
10553
10554 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10555 +{
10556 + return xchg(&v->counter, new);
10557 +}
10558 +
10559 /**
10560 * atomic_add_unless - add unless the number is a given value
10561 * @v: pointer of type atomic_t
10562 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
10563 */
10564 static inline int atomic_add_unless(atomic_t *v, int a, int u)
10565 {
10566 - int c, old;
10567 + int c, old, new;
10568 c = atomic_read(v);
10569 for (;;) {
10570 - if (unlikely(c == (u)))
10571 + if (unlikely(c == u))
10572 break;
10573 - old = atomic_cmpxchg((v), c, c + (a));
10574 +
10575 + asm volatile("addl %2,%0\n"
10576 +
10577 +#ifdef CONFIG_PAX_REFCOUNT
10578 + "jno 0f\n"
10579 + "subl %2,%0\n"
10580 + "int $4\n0:\n"
10581 + _ASM_EXTABLE(0b, 0b)
10582 +#endif
10583 +
10584 + : "=r" (new)
10585 + : "0" (c), "ir" (a));
10586 +
10587 + old = atomic_cmpxchg(v, c, new);
10588 if (likely(old == c))
10589 break;
10590 c = old;
10591 }
10592 - return c != (u);
10593 + return c != u;
10594 }
10595
10596 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
10597 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
10598 */
10599 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10600 {
10601 - long c, old;
10602 + long c, old, new;
10603 c = atomic64_read(v);
10604 for (;;) {
10605 - if (unlikely(c == (u)))
10606 + if (unlikely(c == u))
10607 break;
10608 - old = atomic64_cmpxchg((v), c, c + (a));
10609 +
10610 + asm volatile("addq %2,%0\n"
10611 +
10612 +#ifdef CONFIG_PAX_REFCOUNT
10613 + "jno 0f\n"
10614 + "subq %2,%0\n"
10615 + "int $4\n0:\n"
10616 + _ASM_EXTABLE(0b, 0b)
10617 +#endif
10618 +
10619 + : "=r" (new)
10620 + : "0" (c), "er" (a));
10621 +
10622 + old = atomic64_cmpxchg(v, c, new);
10623 if (likely(old == c))
10624 break;
10625 c = old;
10626 }
10627 - return c != (u);
10628 + return c != u;
10629 }
10630
10631 /**
10632 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10633 index 02b47a6..d5c4b15 100644
10634 --- a/arch/x86/include/asm/bitops.h
10635 +++ b/arch/x86/include/asm/bitops.h
10636 @@ -38,7 +38,7 @@
10637 * a mask operation on a byte.
10638 */
10639 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10640 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10641 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10642 #define CONST_MASK(nr) (1 << ((nr) & 7))
10643
10644 /**
10645 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10646 index 7a10659..8bbf355 100644
10647 --- a/arch/x86/include/asm/boot.h
10648 +++ b/arch/x86/include/asm/boot.h
10649 @@ -11,10 +11,15 @@
10650 #include <asm/pgtable_types.h>
10651
10652 /* Physical address where kernel should be loaded. */
10653 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10654 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10655 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10656 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10657
10658 +#ifndef __ASSEMBLY__
10659 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
10660 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10661 +#endif
10662 +
10663 /* Minimum kernel alignment, as a power of two */
10664 #ifdef CONFIG_X86_64
10665 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10666 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10667 index 549860d..7d45f68 100644
10668 --- a/arch/x86/include/asm/cache.h
10669 +++ b/arch/x86/include/asm/cache.h
10670 @@ -5,9 +5,10 @@
10671
10672 /* L1 cache line size */
10673 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10674 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10675 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10676
10677 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
10678 +#define __read_only __attribute__((__section__(".data.read_only")))
10679
10680 #ifdef CONFIG_X86_VSMP
10681 /* vSMP Internode cacheline shift */
10682 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10683 index b54f6af..5b376a6 100644
10684 --- a/arch/x86/include/asm/cacheflush.h
10685 +++ b/arch/x86/include/asm/cacheflush.h
10686 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
10687 static inline unsigned long get_page_memtype(struct page *pg)
10688 {
10689 if (!PageUncached(pg) && !PageWC(pg))
10690 - return -1;
10691 + return ~0UL;
10692 else if (!PageUncached(pg) && PageWC(pg))
10693 return _PAGE_CACHE_WC;
10694 else if (PageUncached(pg) && !PageWC(pg))
10695 @@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
10696 SetPageWC(pg);
10697 break;
10698 default:
10699 - case -1:
10700 + case ~0UL:
10701 ClearPageUncached(pg);
10702 ClearPageWC(pg);
10703 break;
10704 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
10705 index 0e63c9a..ab8d972 100644
10706 --- a/arch/x86/include/asm/calling.h
10707 +++ b/arch/x86/include/asm/calling.h
10708 @@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
10709 * for assembly code:
10710 */
10711
10712 -#define R15 0
10713 -#define R14 8
10714 -#define R13 16
10715 -#define R12 24
10716 -#define RBP 32
10717 -#define RBX 40
10718 +#define R15 (0)
10719 +#define R14 (8)
10720 +#define R13 (16)
10721 +#define R12 (24)
10722 +#define RBP (32)
10723 +#define RBX (40)
10724
10725 /* arguments: interrupts/non tracing syscalls only save up to here: */
10726 -#define R11 48
10727 -#define R10 56
10728 -#define R9 64
10729 -#define R8 72
10730 -#define RAX 80
10731 -#define RCX 88
10732 -#define RDX 96
10733 -#define RSI 104
10734 -#define RDI 112
10735 -#define ORIG_RAX 120 /* + error_code */
10736 +#define R11 (48)
10737 +#define R10 (56)
10738 +#define R9 (64)
10739 +#define R8 (72)
10740 +#define RAX (80)
10741 +#define RCX (88)
10742 +#define RDX (96)
10743 +#define RSI (104)
10744 +#define RDI (112)
10745 +#define ORIG_RAX (120) /* + error_code */
10746 /* end of arguments */
10747
10748 /* cpu exception frame or undefined in case of fast syscall: */
10749 -#define RIP 128
10750 -#define CS 136
10751 -#define EFLAGS 144
10752 -#define RSP 152
10753 -#define SS 160
10754 +#define RIP (128)
10755 +#define CS (136)
10756 +#define EFLAGS (144)
10757 +#define RSP (152)
10758 +#define SS (160)
10759
10760 #define ARGOFFSET R11
10761 #define SWFRAME ORIG_RAX
10762 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10763 index 46fc474..b02b0f9 100644
10764 --- a/arch/x86/include/asm/checksum_32.h
10765 +++ b/arch/x86/include/asm/checksum_32.h
10766 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10767 int len, __wsum sum,
10768 int *src_err_ptr, int *dst_err_ptr);
10769
10770 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10771 + int len, __wsum sum,
10772 + int *src_err_ptr, int *dst_err_ptr);
10773 +
10774 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10775 + int len, __wsum sum,
10776 + int *src_err_ptr, int *dst_err_ptr);
10777 +
10778 /*
10779 * Note: when you get a NULL pointer exception here this means someone
10780 * passed in an incorrect kernel address to one of these functions.
10781 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10782 int *err_ptr)
10783 {
10784 might_sleep();
10785 - return csum_partial_copy_generic((__force void *)src, dst,
10786 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10787 len, sum, err_ptr, NULL);
10788 }
10789
10790 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10791 {
10792 might_sleep();
10793 if (access_ok(VERIFY_WRITE, dst, len))
10794 - return csum_partial_copy_generic(src, (__force void *)dst,
10795 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10796 len, sum, NULL, err_ptr);
10797
10798 if (len)
10799 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10800 index 617bd56..7b047a1 100644
10801 --- a/arch/x86/include/asm/desc.h
10802 +++ b/arch/x86/include/asm/desc.h
10803 @@ -4,6 +4,7 @@
10804 #include <asm/desc_defs.h>
10805 #include <asm/ldt.h>
10806 #include <asm/mmu.h>
10807 +#include <asm/pgtable.h>
10808 #include <linux/smp.h>
10809
10810 static inline void fill_ldt(struct desc_struct *desc,
10811 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
10812 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
10813 desc->type = (info->read_exec_only ^ 1) << 1;
10814 desc->type |= info->contents << 2;
10815 + desc->type |= info->seg_not_present ^ 1;
10816 desc->s = 1;
10817 desc->dpl = 0x3;
10818 desc->p = info->seg_not_present ^ 1;
10819 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
10820 }
10821
10822 extern struct desc_ptr idt_descr;
10823 -extern gate_desc idt_table[];
10824 -
10825 -struct gdt_page {
10826 - struct desc_struct gdt[GDT_ENTRIES];
10827 -} __attribute__((aligned(PAGE_SIZE)));
10828 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10829 +extern gate_desc idt_table[256];
10830
10831 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10832 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10833 {
10834 - return per_cpu(gdt_page, cpu).gdt;
10835 + return cpu_gdt_table[cpu];
10836 }
10837
10838 #ifdef CONFIG_X86_64
10839 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10840 unsigned long base, unsigned dpl, unsigned flags,
10841 unsigned short seg)
10842 {
10843 - gate->a = (seg << 16) | (base & 0xffff);
10844 - gate->b = (base & 0xffff0000) |
10845 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10846 + gate->gate.offset_low = base;
10847 + gate->gate.seg = seg;
10848 + gate->gate.reserved = 0;
10849 + gate->gate.type = type;
10850 + gate->gate.s = 0;
10851 + gate->gate.dpl = dpl;
10852 + gate->gate.p = 1;
10853 + gate->gate.offset_high = base >> 16;
10854 }
10855
10856 #endif
10857 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10858 static inline void native_write_idt_entry(gate_desc *idt, int entry,
10859 const gate_desc *gate)
10860 {
10861 + pax_open_kernel();
10862 memcpy(&idt[entry], gate, sizeof(*gate));
10863 + pax_close_kernel();
10864 }
10865
10866 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
10867 const void *desc)
10868 {
10869 + pax_open_kernel();
10870 memcpy(&ldt[entry], desc, 8);
10871 + pax_close_kernel();
10872 }
10873
10874 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10875 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10876 size = sizeof(struct desc_struct);
10877 break;
10878 }
10879 +
10880 + pax_open_kernel();
10881 memcpy(&gdt[entry], desc, size);
10882 + pax_close_kernel();
10883 }
10884
10885 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10886 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10887
10888 static inline void native_load_tr_desc(void)
10889 {
10890 + pax_open_kernel();
10891 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10892 + pax_close_kernel();
10893 }
10894
10895 static inline void native_load_gdt(const struct desc_ptr *dtr)
10896 @@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10897 unsigned int i;
10898 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10899
10900 + pax_open_kernel();
10901 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10902 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10903 + pax_close_kernel();
10904 }
10905
10906 #define _LDT_empty(info) \
10907 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10908 desc->limit = (limit >> 16) & 0xf;
10909 }
10910
10911 -static inline void _set_gate(int gate, unsigned type, void *addr,
10912 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10913 unsigned dpl, unsigned ist, unsigned seg)
10914 {
10915 gate_desc s;
10916 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10917 * Pentium F0 0F bugfix can have resulted in the mapped
10918 * IDT being write-protected.
10919 */
10920 -static inline void set_intr_gate(unsigned int n, void *addr)
10921 +static inline void set_intr_gate(unsigned int n, const void *addr)
10922 {
10923 BUG_ON((unsigned)n > 0xFF);
10924 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10925 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10926 /*
10927 * This routine sets up an interrupt gate at directory privilege level 3.
10928 */
10929 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10930 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10931 {
10932 BUG_ON((unsigned)n > 0xFF);
10933 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10934 }
10935
10936 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10937 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10938 {
10939 BUG_ON((unsigned)n > 0xFF);
10940 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10941 }
10942
10943 -static inline void set_trap_gate(unsigned int n, void *addr)
10944 +static inline void set_trap_gate(unsigned int n, const void *addr)
10945 {
10946 BUG_ON((unsigned)n > 0xFF);
10947 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10948 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10949 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10950 {
10951 BUG_ON((unsigned)n > 0xFF);
10952 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10953 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10954 }
10955
10956 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10957 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10958 {
10959 BUG_ON((unsigned)n > 0xFF);
10960 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10961 }
10962
10963 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10964 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10965 {
10966 BUG_ON((unsigned)n > 0xFF);
10967 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10968 }
10969
10970 +#ifdef CONFIG_X86_32
10971 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10972 +{
10973 + struct desc_struct d;
10974 +
10975 + if (likely(limit))
10976 + limit = (limit - 1UL) >> PAGE_SHIFT;
10977 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10978 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10979 +}
10980 +#endif
10981 +
10982 #endif /* _ASM_X86_DESC_H */
10983 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10984 index 9d66848..6b4a691 100644
10985 --- a/arch/x86/include/asm/desc_defs.h
10986 +++ b/arch/x86/include/asm/desc_defs.h
10987 @@ -31,6 +31,12 @@ struct desc_struct {
10988 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10989 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10990 };
10991 + struct {
10992 + u16 offset_low;
10993 + u16 seg;
10994 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10995 + unsigned offset_high: 16;
10996 + } gate;
10997 };
10998 } __attribute__((packed));
10999
11000 diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
11001 index cee34e9..a7c3fa2 100644
11002 --- a/arch/x86/include/asm/device.h
11003 +++ b/arch/x86/include/asm/device.h
11004 @@ -6,7 +6,7 @@ struct dev_archdata {
11005 void *acpi_handle;
11006 #endif
11007 #ifdef CONFIG_X86_64
11008 -struct dma_map_ops *dma_ops;
11009 + const struct dma_map_ops *dma_ops;
11010 #endif
11011 #ifdef CONFIG_DMAR
11012 void *iommu; /* hook for IOMMU specific extension */
11013 diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
11014 index 6a25d5d..786b202 100644
11015 --- a/arch/x86/include/asm/dma-mapping.h
11016 +++ b/arch/x86/include/asm/dma-mapping.h
11017 @@ -25,9 +25,9 @@ extern int iommu_merge;
11018 extern struct device x86_dma_fallback_dev;
11019 extern int panic_on_overflow;
11020
11021 -extern struct dma_map_ops *dma_ops;
11022 +extern const struct dma_map_ops *dma_ops;
11023
11024 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11025 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
11026 {
11027 #ifdef CONFIG_X86_32
11028 return dma_ops;
11029 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11030 /* Make sure we keep the same behaviour */
11031 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
11032 {
11033 - struct dma_map_ops *ops = get_dma_ops(dev);
11034 + const struct dma_map_ops *ops = get_dma_ops(dev);
11035 if (ops->mapping_error)
11036 return ops->mapping_error(dev, dma_addr);
11037
11038 @@ -122,7 +122,7 @@ static inline void *
11039 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11040 gfp_t gfp)
11041 {
11042 - struct dma_map_ops *ops = get_dma_ops(dev);
11043 + const struct dma_map_ops *ops = get_dma_ops(dev);
11044 void *memory;
11045
11046 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
11047 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11048 static inline void dma_free_coherent(struct device *dev, size_t size,
11049 void *vaddr, dma_addr_t bus)
11050 {
11051 - struct dma_map_ops *ops = get_dma_ops(dev);
11052 + const struct dma_map_ops *ops = get_dma_ops(dev);
11053
11054 WARN_ON(irqs_disabled()); /* for portability */
11055
11056 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
11057 index 40b4e61..40d8133 100644
11058 --- a/arch/x86/include/asm/e820.h
11059 +++ b/arch/x86/include/asm/e820.h
11060 @@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
11061 #define ISA_END_ADDRESS 0x100000
11062 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
11063
11064 -#define BIOS_BEGIN 0x000a0000
11065 +#define BIOS_BEGIN 0x000c0000
11066 #define BIOS_END 0x00100000
11067
11068 #ifdef __KERNEL__
11069 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
11070 index 8ac9d9a..0a6c96e 100644
11071 --- a/arch/x86/include/asm/elf.h
11072 +++ b/arch/x86/include/asm/elf.h
11073 @@ -257,7 +257,25 @@ extern int force_personality32;
11074 the loader. We need to make sure that it is out of the way of the program
11075 that it will "exec", and that there is sufficient room for the brk. */
11076
11077 +#ifdef CONFIG_PAX_SEGMEXEC
11078 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
11079 +#else
11080 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
11081 +#endif
11082 +
11083 +#ifdef CONFIG_PAX_ASLR
11084 +#ifdef CONFIG_X86_32
11085 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
11086 +
11087 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11088 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11089 +#else
11090 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
11091 +
11092 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11093 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11094 +#endif
11095 +#endif
11096
11097 /* This yields a mask that user programs can use to figure out what
11098 instruction set this CPU supports. This could be done in user space,
11099 @@ -310,9 +328,7 @@ do { \
11100
11101 #define ARCH_DLINFO \
11102 do { \
11103 - if (vdso_enabled) \
11104 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11105 - (unsigned long)current->mm->context.vdso); \
11106 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11107 } while (0)
11108
11109 #define AT_SYSINFO 32
11110 @@ -323,7 +339,7 @@ do { \
11111
11112 #endif /* !CONFIG_X86_32 */
11113
11114 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
11115 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
11116
11117 #define VDSO_ENTRY \
11118 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
11119 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
11120 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
11121 #define compat_arch_setup_additional_pages syscall32_setup_pages
11122
11123 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
11124 -#define arch_randomize_brk arch_randomize_brk
11125 -
11126 #endif /* _ASM_X86_ELF_H */
11127 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
11128 index cc70c1c..d96d011 100644
11129 --- a/arch/x86/include/asm/emergency-restart.h
11130 +++ b/arch/x86/include/asm/emergency-restart.h
11131 @@ -15,6 +15,6 @@ enum reboot_type {
11132
11133 extern enum reboot_type reboot_type;
11134
11135 -extern void machine_emergency_restart(void);
11136 +extern void machine_emergency_restart(void) __noreturn;
11137
11138 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
11139 diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
11140 index dbe82a5..c6d8a00 100644
11141 --- a/arch/x86/include/asm/floppy.h
11142 +++ b/arch/x86/include/asm/floppy.h
11143 @@ -157,6 +157,7 @@ static unsigned long dma_mem_alloc(unsigned long size)
11144 }
11145
11146
11147 +static unsigned long vdma_mem_alloc(unsigned long size) __size_overflow(1);
11148 static unsigned long vdma_mem_alloc(unsigned long size)
11149 {
11150 return (unsigned long)vmalloc(size);
11151 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
11152 index 1f11ce4..7caabd1 100644
11153 --- a/arch/x86/include/asm/futex.h
11154 +++ b/arch/x86/include/asm/futex.h
11155 @@ -12,16 +12,18 @@
11156 #include <asm/system.h>
11157
11158 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
11159 + typecheck(u32 __user *, uaddr); \
11160 asm volatile("1:\t" insn "\n" \
11161 "2:\t.section .fixup,\"ax\"\n" \
11162 "3:\tmov\t%3, %1\n" \
11163 "\tjmp\t2b\n" \
11164 "\t.previous\n" \
11165 _ASM_EXTABLE(1b, 3b) \
11166 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
11167 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
11168 : "i" (-EFAULT), "0" (oparg), "1" (0))
11169
11170 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
11171 + typecheck(u32 __user *, uaddr); \
11172 asm volatile("1:\tmovl %2, %0\n" \
11173 "\tmovl\t%0, %3\n" \
11174 "\t" insn "\n" \
11175 @@ -34,10 +36,10 @@
11176 _ASM_EXTABLE(1b, 4b) \
11177 _ASM_EXTABLE(2b, 4b) \
11178 : "=&a" (oldval), "=&r" (ret), \
11179 - "+m" (*uaddr), "=&r" (tem) \
11180 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
11181 : "r" (oparg), "i" (-EFAULT), "1" (0))
11182
11183 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11184 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11185 {
11186 int op = (encoded_op >> 28) & 7;
11187 int cmp = (encoded_op >> 24) & 15;
11188 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11189
11190 switch (op) {
11191 case FUTEX_OP_SET:
11192 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
11193 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
11194 break;
11195 case FUTEX_OP_ADD:
11196 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
11197 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
11198 uaddr, oparg);
11199 break;
11200 case FUTEX_OP_OR:
11201 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11202 return ret;
11203 }
11204
11205 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
11206 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
11207 int newval)
11208 {
11209
11210 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
11211 return -ENOSYS;
11212 #endif
11213
11214 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
11215 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
11216 return -EFAULT;
11217
11218 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
11219 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
11220 "2:\t.section .fixup, \"ax\"\n"
11221 "3:\tmov %2, %0\n"
11222 "\tjmp 2b\n"
11223 "\t.previous\n"
11224 _ASM_EXTABLE(1b, 3b)
11225 - : "=a" (oldval), "+m" (*uaddr)
11226 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
11227 : "i" (-EFAULT), "r" (newval), "0" (oldval)
11228 : "memory"
11229 );
11230 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
11231 index ba180d9..3bad351 100644
11232 --- a/arch/x86/include/asm/hw_irq.h
11233 +++ b/arch/x86/include/asm/hw_irq.h
11234 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
11235 extern void enable_IO_APIC(void);
11236
11237 /* Statistics */
11238 -extern atomic_t irq_err_count;
11239 -extern atomic_t irq_mis_count;
11240 +extern atomic_unchecked_t irq_err_count;
11241 +extern atomic_unchecked_t irq_mis_count;
11242
11243 /* EISA */
11244 extern void eisa_set_level_irq(unsigned int irq);
11245 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
11246 index 0b20bbb..4cb1396 100644
11247 --- a/arch/x86/include/asm/i387.h
11248 +++ b/arch/x86/include/asm/i387.h
11249 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
11250 {
11251 int err;
11252
11253 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11254 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
11255 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
11256 +#endif
11257 +
11258 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
11259 "2:\n"
11260 ".section .fixup,\"ax\"\n"
11261 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
11262 {
11263 int err;
11264
11265 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11266 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
11267 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
11268 +#endif
11269 +
11270 asm volatile("1: rex64/fxsave (%[fx])\n\t"
11271 "2:\n"
11272 ".section .fixup,\"ax\"\n"
11273 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
11274 }
11275
11276 /* We need a safe address that is cheap to find and that is already
11277 - in L1 during context switch. The best choices are unfortunately
11278 - different for UP and SMP */
11279 -#ifdef CONFIG_SMP
11280 -#define safe_address (__per_cpu_offset[0])
11281 -#else
11282 -#define safe_address (kstat_cpu(0).cpustat.user)
11283 -#endif
11284 + in L1 during context switch. */
11285 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
11286
11287 /*
11288 * These must be called with preempt disabled
11289 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
11290 struct thread_info *me = current_thread_info();
11291 preempt_disable();
11292 if (me->status & TS_USEDFPU)
11293 - __save_init_fpu(me->task);
11294 + __save_init_fpu(current);
11295 else
11296 clts();
11297 }
11298 diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
11299 index a299900..15c5410 100644
11300 --- a/arch/x86/include/asm/io_32.h
11301 +++ b/arch/x86/include/asm/io_32.h
11302 @@ -3,6 +3,7 @@
11303
11304 #include <linux/string.h>
11305 #include <linux/compiler.h>
11306 +#include <asm/processor.h>
11307
11308 /*
11309 * This file contains the definitions for the x86 IO instructions
11310 @@ -42,6 +43,17 @@
11311
11312 #ifdef __KERNEL__
11313
11314 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11315 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11316 +{
11317 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11318 +}
11319 +
11320 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11321 +{
11322 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11323 +}
11324 +
11325 #include <asm-generic/iomap.h>
11326
11327 #include <linux/vmalloc.h>
11328 diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
11329 index 2440678..c158b88 100644
11330 --- a/arch/x86/include/asm/io_64.h
11331 +++ b/arch/x86/include/asm/io_64.h
11332 @@ -140,6 +140,17 @@ __OUTS(l)
11333
11334 #include <linux/vmalloc.h>
11335
11336 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11337 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11338 +{
11339 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11340 +}
11341 +
11342 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11343 +{
11344 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11345 +}
11346 +
11347 #include <asm-generic/iomap.h>
11348
11349 void __memcpy_fromio(void *, unsigned long, unsigned);
11350 diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
11351 index fd6d21b..8b13915 100644
11352 --- a/arch/x86/include/asm/iommu.h
11353 +++ b/arch/x86/include/asm/iommu.h
11354 @@ -3,7 +3,7 @@
11355
11356 extern void pci_iommu_shutdown(void);
11357 extern void no_iommu_init(void);
11358 -extern struct dma_map_ops nommu_dma_ops;
11359 +extern const struct dma_map_ops nommu_dma_ops;
11360 extern int force_iommu, no_iommu;
11361 extern int iommu_detected;
11362 extern int iommu_pass_through;
11363 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
11364 index 9e2b952..557206e 100644
11365 --- a/arch/x86/include/asm/irqflags.h
11366 +++ b/arch/x86/include/asm/irqflags.h
11367 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
11368 sti; \
11369 sysexit
11370
11371 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
11372 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
11373 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
11374 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
11375 +
11376 #else
11377 #define INTERRUPT_RETURN iret
11378 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
11379 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
11380 index 4fe681d..bb6d40c 100644
11381 --- a/arch/x86/include/asm/kprobes.h
11382 +++ b/arch/x86/include/asm/kprobes.h
11383 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
11384 #define BREAKPOINT_INSTRUCTION 0xcc
11385 #define RELATIVEJUMP_INSTRUCTION 0xe9
11386 #define MAX_INSN_SIZE 16
11387 -#define MAX_STACK_SIZE 64
11388 -#define MIN_STACK_SIZE(ADDR) \
11389 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
11390 - THREAD_SIZE - (unsigned long)(ADDR))) \
11391 - ? (MAX_STACK_SIZE) \
11392 - : (((unsigned long)current_thread_info()) + \
11393 - THREAD_SIZE - (unsigned long)(ADDR)))
11394 +#define MAX_STACK_SIZE 64UL
11395 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
11396
11397 #define flush_insn_slot(p) do { } while (0)
11398
11399 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
11400 index 08bc2ff..acafd8f 100644
11401 --- a/arch/x86/include/asm/kvm_host.h
11402 +++ b/arch/x86/include/asm/kvm_host.h
11403 @@ -534,9 +534,9 @@ struct kvm_x86_ops {
11404 bool (*gb_page_enable)(void);
11405
11406 const struct trace_print_flags *exit_reasons_str;
11407 -};
11408 +} __do_const;
11409
11410 -extern struct kvm_x86_ops *kvm_x86_ops;
11411 +extern const struct kvm_x86_ops *kvm_x86_ops;
11412
11413 int kvm_mmu_module_init(void);
11414 void kvm_mmu_module_exit(void);
11415 @@ -558,9 +558,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
11416 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
11417
11418 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
11419 - const void *val, int bytes);
11420 + const void *val, int bytes) __size_overflow(2);
11421 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
11422 - gpa_t addr, unsigned long *ret);
11423 + gpa_t addr, unsigned long *ret) __size_overflow(2,3);
11424 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
11425
11426 extern bool tdp_enabled;
11427 @@ -619,7 +619,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
11428 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
11429
11430 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
11431 -int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
11432 +int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) __size_overflow(3);
11433
11434 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
11435 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
11436 @@ -643,7 +643,7 @@ unsigned long segment_base(u16 selector);
11437 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
11438 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
11439 const u8 *new, int bytes,
11440 - bool guest_initiated);
11441 + bool guest_initiated) __size_overflow(2);
11442 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
11443 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
11444 int kvm_mmu_load(struct kvm_vcpu *vcpu);
11445 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
11446 index 47b9b6f..815aaa1 100644
11447 --- a/arch/x86/include/asm/local.h
11448 +++ b/arch/x86/include/asm/local.h
11449 @@ -18,26 +18,58 @@ typedef struct {
11450
11451 static inline void local_inc(local_t *l)
11452 {
11453 - asm volatile(_ASM_INC "%0"
11454 + asm volatile(_ASM_INC "%0\n"
11455 +
11456 +#ifdef CONFIG_PAX_REFCOUNT
11457 + "jno 0f\n"
11458 + _ASM_DEC "%0\n"
11459 + "int $4\n0:\n"
11460 + _ASM_EXTABLE(0b, 0b)
11461 +#endif
11462 +
11463 : "+m" (l->a.counter));
11464 }
11465
11466 static inline void local_dec(local_t *l)
11467 {
11468 - asm volatile(_ASM_DEC "%0"
11469 + asm volatile(_ASM_DEC "%0\n"
11470 +
11471 +#ifdef CONFIG_PAX_REFCOUNT
11472 + "jno 0f\n"
11473 + _ASM_INC "%0\n"
11474 + "int $4\n0:\n"
11475 + _ASM_EXTABLE(0b, 0b)
11476 +#endif
11477 +
11478 : "+m" (l->a.counter));
11479 }
11480
11481 static inline void local_add(long i, local_t *l)
11482 {
11483 - asm volatile(_ASM_ADD "%1,%0"
11484 + asm volatile(_ASM_ADD "%1,%0\n"
11485 +
11486 +#ifdef CONFIG_PAX_REFCOUNT
11487 + "jno 0f\n"
11488 + _ASM_SUB "%1,%0\n"
11489 + "int $4\n0:\n"
11490 + _ASM_EXTABLE(0b, 0b)
11491 +#endif
11492 +
11493 : "+m" (l->a.counter)
11494 : "ir" (i));
11495 }
11496
11497 static inline void local_sub(long i, local_t *l)
11498 {
11499 - asm volatile(_ASM_SUB "%1,%0"
11500 + asm volatile(_ASM_SUB "%1,%0\n"
11501 +
11502 +#ifdef CONFIG_PAX_REFCOUNT
11503 + "jno 0f\n"
11504 + _ASM_ADD "%1,%0\n"
11505 + "int $4\n0:\n"
11506 + _ASM_EXTABLE(0b, 0b)
11507 +#endif
11508 +
11509 : "+m" (l->a.counter)
11510 : "ir" (i));
11511 }
11512 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
11513 {
11514 unsigned char c;
11515
11516 - asm volatile(_ASM_SUB "%2,%0; sete %1"
11517 + asm volatile(_ASM_SUB "%2,%0\n"
11518 +
11519 +#ifdef CONFIG_PAX_REFCOUNT
11520 + "jno 0f\n"
11521 + _ASM_ADD "%2,%0\n"
11522 + "int $4\n0:\n"
11523 + _ASM_EXTABLE(0b, 0b)
11524 +#endif
11525 +
11526 + "sete %1\n"
11527 : "+m" (l->a.counter), "=qm" (c)
11528 : "ir" (i) : "memory");
11529 return c;
11530 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
11531 {
11532 unsigned char c;
11533
11534 - asm volatile(_ASM_DEC "%0; sete %1"
11535 + asm volatile(_ASM_DEC "%0\n"
11536 +
11537 +#ifdef CONFIG_PAX_REFCOUNT
11538 + "jno 0f\n"
11539 + _ASM_INC "%0\n"
11540 + "int $4\n0:\n"
11541 + _ASM_EXTABLE(0b, 0b)
11542 +#endif
11543 +
11544 + "sete %1\n"
11545 : "+m" (l->a.counter), "=qm" (c)
11546 : : "memory");
11547 return c != 0;
11548 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
11549 {
11550 unsigned char c;
11551
11552 - asm volatile(_ASM_INC "%0; sete %1"
11553 + asm volatile(_ASM_INC "%0\n"
11554 +
11555 +#ifdef CONFIG_PAX_REFCOUNT
11556 + "jno 0f\n"
11557 + _ASM_DEC "%0\n"
11558 + "int $4\n0:\n"
11559 + _ASM_EXTABLE(0b, 0b)
11560 +#endif
11561 +
11562 + "sete %1\n"
11563 : "+m" (l->a.counter), "=qm" (c)
11564 : : "memory");
11565 return c != 0;
11566 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
11567 {
11568 unsigned char c;
11569
11570 - asm volatile(_ASM_ADD "%2,%0; sets %1"
11571 + asm volatile(_ASM_ADD "%2,%0\n"
11572 +
11573 +#ifdef CONFIG_PAX_REFCOUNT
11574 + "jno 0f\n"
11575 + _ASM_SUB "%2,%0\n"
11576 + "int $4\n0:\n"
11577 + _ASM_EXTABLE(0b, 0b)
11578 +#endif
11579 +
11580 + "sets %1\n"
11581 : "+m" (l->a.counter), "=qm" (c)
11582 : "ir" (i) : "memory");
11583 return c;
11584 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
11585 #endif
11586 /* Modern 486+ processor */
11587 __i = i;
11588 - asm volatile(_ASM_XADD "%0, %1;"
11589 + asm volatile(_ASM_XADD "%0, %1\n"
11590 +
11591 +#ifdef CONFIG_PAX_REFCOUNT
11592 + "jno 0f\n"
11593 + _ASM_MOV "%0,%1\n"
11594 + "int $4\n0:\n"
11595 + _ASM_EXTABLE(0b, 0b)
11596 +#endif
11597 +
11598 : "+r" (i), "+m" (l->a.counter)
11599 : : "memory");
11600 return i + __i;
11601 diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
11602 index ef51b50..514ba37 100644
11603 --- a/arch/x86/include/asm/microcode.h
11604 +++ b/arch/x86/include/asm/microcode.h
11605 @@ -12,13 +12,13 @@ struct device;
11606 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
11607
11608 struct microcode_ops {
11609 - enum ucode_state (*request_microcode_user) (int cpu,
11610 + enum ucode_state (* const request_microcode_user) (int cpu,
11611 const void __user *buf, size_t size);
11612
11613 - enum ucode_state (*request_microcode_fw) (int cpu,
11614 + enum ucode_state (* const request_microcode_fw) (int cpu,
11615 struct device *device);
11616
11617 - void (*microcode_fini_cpu) (int cpu);
11618 + void (* const microcode_fini_cpu) (int cpu);
11619
11620 /*
11621 * The generic 'microcode_core' part guarantees that
11622 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
11623 extern struct ucode_cpu_info ucode_cpu_info[];
11624
11625 #ifdef CONFIG_MICROCODE_INTEL
11626 -extern struct microcode_ops * __init init_intel_microcode(void);
11627 +extern const struct microcode_ops * __init init_intel_microcode(void);
11628 #else
11629 -static inline struct microcode_ops * __init init_intel_microcode(void)
11630 +static inline const struct microcode_ops * __init init_intel_microcode(void)
11631 {
11632 return NULL;
11633 }
11634 #endif /* CONFIG_MICROCODE_INTEL */
11635
11636 #ifdef CONFIG_MICROCODE_AMD
11637 -extern struct microcode_ops * __init init_amd_microcode(void);
11638 +extern const struct microcode_ops * __init init_amd_microcode(void);
11639 #else
11640 -static inline struct microcode_ops * __init init_amd_microcode(void)
11641 +static inline const struct microcode_ops * __init init_amd_microcode(void)
11642 {
11643 return NULL;
11644 }
11645 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
11646 index 593e51d..fa69c9a 100644
11647 --- a/arch/x86/include/asm/mman.h
11648 +++ b/arch/x86/include/asm/mman.h
11649 @@ -5,4 +5,14 @@
11650
11651 #include <asm-generic/mman.h>
11652
11653 +#ifdef __KERNEL__
11654 +#ifndef __ASSEMBLY__
11655 +#ifdef CONFIG_X86_32
11656 +#define arch_mmap_check i386_mmap_check
11657 +int i386_mmap_check(unsigned long addr, unsigned long len,
11658 + unsigned long flags);
11659 +#endif
11660 +#endif
11661 +#endif
11662 +
11663 #endif /* _ASM_X86_MMAN_H */
11664 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
11665 index 80a1dee..239c67d 100644
11666 --- a/arch/x86/include/asm/mmu.h
11667 +++ b/arch/x86/include/asm/mmu.h
11668 @@ -9,10 +9,23 @@
11669 * we put the segment information here.
11670 */
11671 typedef struct {
11672 - void *ldt;
11673 + struct desc_struct *ldt;
11674 int size;
11675 struct mutex lock;
11676 - void *vdso;
11677 + unsigned long vdso;
11678 +
11679 +#ifdef CONFIG_X86_32
11680 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
11681 + unsigned long user_cs_base;
11682 + unsigned long user_cs_limit;
11683 +
11684 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11685 + cpumask_t cpu_user_cs_mask;
11686 +#endif
11687 +
11688 +#endif
11689 +#endif
11690 +
11691 } mm_context_t;
11692
11693 #ifdef CONFIG_SMP
11694 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
11695 index 8b5393e..8143173 100644
11696 --- a/arch/x86/include/asm/mmu_context.h
11697 +++ b/arch/x86/include/asm/mmu_context.h
11698 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
11699
11700 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11701 {
11702 +
11703 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11704 + unsigned int i;
11705 + pgd_t *pgd;
11706 +
11707 + pax_open_kernel();
11708 + pgd = get_cpu_pgd(smp_processor_id());
11709 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
11710 + set_pgd_batched(pgd+i, native_make_pgd(0));
11711 + pax_close_kernel();
11712 +#endif
11713 +
11714 #ifdef CONFIG_SMP
11715 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11716 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
11717 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11718 struct task_struct *tsk)
11719 {
11720 unsigned cpu = smp_processor_id();
11721 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
11722 + int tlbstate = TLBSTATE_OK;
11723 +#endif
11724
11725 if (likely(prev != next)) {
11726 #ifdef CONFIG_SMP
11727 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11728 + tlbstate = percpu_read(cpu_tlbstate.state);
11729 +#endif
11730 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11731 percpu_write(cpu_tlbstate.active_mm, next);
11732 #endif
11733 cpumask_set_cpu(cpu, mm_cpumask(next));
11734
11735 /* Re-load page tables */
11736 +#ifdef CONFIG_PAX_PER_CPU_PGD
11737 + pax_open_kernel();
11738 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11739 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11740 + pax_close_kernel();
11741 + load_cr3(get_cpu_pgd(cpu));
11742 +#else
11743 load_cr3(next->pgd);
11744 +#endif
11745
11746 /* stop flush ipis for the previous mm */
11747 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11748 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11749 */
11750 if (unlikely(prev->context.ldt != next->context.ldt))
11751 load_LDT_nolock(&next->context);
11752 - }
11753 +
11754 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11755 + if (!nx_enabled) {
11756 + smp_mb__before_clear_bit();
11757 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11758 + smp_mb__after_clear_bit();
11759 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11760 + }
11761 +#endif
11762 +
11763 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11764 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11765 + prev->context.user_cs_limit != next->context.user_cs_limit))
11766 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11767 #ifdef CONFIG_SMP
11768 + else if (unlikely(tlbstate != TLBSTATE_OK))
11769 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11770 +#endif
11771 +#endif
11772 +
11773 + }
11774 else {
11775 +
11776 +#ifdef CONFIG_PAX_PER_CPU_PGD
11777 + pax_open_kernel();
11778 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11779 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11780 + pax_close_kernel();
11781 + load_cr3(get_cpu_pgd(cpu));
11782 +#endif
11783 +
11784 +#ifdef CONFIG_SMP
11785 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11786 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11787
11788 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11789 * tlb flush IPI delivery. We must reload CR3
11790 * to make sure to use no freed page tables.
11791 */
11792 +
11793 +#ifndef CONFIG_PAX_PER_CPU_PGD
11794 load_cr3(next->pgd);
11795 +#endif
11796 +
11797 load_LDT_nolock(&next->context);
11798 +
11799 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11800 + if (!nx_enabled)
11801 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11802 +#endif
11803 +
11804 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11805 +#ifdef CONFIG_PAX_PAGEEXEC
11806 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
11807 +#endif
11808 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11809 +#endif
11810 +
11811 }
11812 +#endif
11813 }
11814 -#endif
11815 }
11816
11817 #define activate_mm(prev, next) \
11818 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11819 index 3e2ce58..caaf478 100644
11820 --- a/arch/x86/include/asm/module.h
11821 +++ b/arch/x86/include/asm/module.h
11822 @@ -5,6 +5,7 @@
11823
11824 #ifdef CONFIG_X86_64
11825 /* X86_64 does not define MODULE_PROC_FAMILY */
11826 +#define MODULE_PROC_FAMILY ""
11827 #elif defined CONFIG_M386
11828 #define MODULE_PROC_FAMILY "386 "
11829 #elif defined CONFIG_M486
11830 @@ -59,13 +60,26 @@
11831 #error unknown processor family
11832 #endif
11833
11834 -#ifdef CONFIG_X86_32
11835 -# ifdef CONFIG_4KSTACKS
11836 -# define MODULE_STACKSIZE "4KSTACKS "
11837 -# else
11838 -# define MODULE_STACKSIZE ""
11839 -# endif
11840 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
11841 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
11842 +#define MODULE_STACKSIZE "4KSTACKS "
11843 +#else
11844 +#define MODULE_STACKSIZE ""
11845 #endif
11846
11847 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11848 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11849 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11850 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11851 +#else
11852 +#define MODULE_PAX_KERNEXEC ""
11853 +#endif
11854 +
11855 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11856 +#define MODULE_PAX_UDEREF "UDEREF "
11857 +#else
11858 +#define MODULE_PAX_UDEREF ""
11859 +#endif
11860 +
11861 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11862 +
11863 #endif /* _ASM_X86_MODULE_H */
11864 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11865 index 7639dbf..e08a58c 100644
11866 --- a/arch/x86/include/asm/page_64_types.h
11867 +++ b/arch/x86/include/asm/page_64_types.h
11868 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11869
11870 /* duplicated to the one in bootmem.h */
11871 extern unsigned long max_pfn;
11872 -extern unsigned long phys_base;
11873 +extern const unsigned long phys_base;
11874
11875 extern unsigned long __phys_addr(unsigned long);
11876 #define __phys_reloc_hide(x) (x)
11877 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11878 index efb3899..ef30687 100644
11879 --- a/arch/x86/include/asm/paravirt.h
11880 +++ b/arch/x86/include/asm/paravirt.h
11881 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11882 val);
11883 }
11884
11885 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11886 +{
11887 + pgdval_t val = native_pgd_val(pgd);
11888 +
11889 + if (sizeof(pgdval_t) > sizeof(long))
11890 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11891 + val, (u64)val >> 32);
11892 + else
11893 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11894 + val);
11895 +}
11896 +
11897 static inline void pgd_clear(pgd_t *pgdp)
11898 {
11899 set_pgd(pgdp, __pgd(0));
11900 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11901 pv_mmu_ops.set_fixmap(idx, phys, flags);
11902 }
11903
11904 +#ifdef CONFIG_PAX_KERNEXEC
11905 +static inline unsigned long pax_open_kernel(void)
11906 +{
11907 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11908 +}
11909 +
11910 +static inline unsigned long pax_close_kernel(void)
11911 +{
11912 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11913 +}
11914 +#else
11915 +static inline unsigned long pax_open_kernel(void) { return 0; }
11916 +static inline unsigned long pax_close_kernel(void) { return 0; }
11917 +#endif
11918 +
11919 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11920
11921 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
11922 @@ -945,7 +972,7 @@ extern void default_banner(void);
11923
11924 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11925 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11926 -#define PARA_INDIRECT(addr) *%cs:addr
11927 +#define PARA_INDIRECT(addr) *%ss:addr
11928 #endif
11929
11930 #define INTERRUPT_RETURN \
11931 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
11932 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11933 CLBR_NONE, \
11934 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11935 +
11936 +#define GET_CR0_INTO_RDI \
11937 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11938 + mov %rax,%rdi
11939 +
11940 +#define SET_RDI_INTO_CR0 \
11941 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11942 +
11943 +#define GET_CR3_INTO_RDI \
11944 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11945 + mov %rax,%rdi
11946 +
11947 +#define SET_RDI_INTO_CR3 \
11948 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11949 +
11950 #endif /* CONFIG_X86_32 */
11951
11952 #endif /* __ASSEMBLY__ */
11953 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11954 index 9357473..aeb2de5 100644
11955 --- a/arch/x86/include/asm/paravirt_types.h
11956 +++ b/arch/x86/include/asm/paravirt_types.h
11957 @@ -78,19 +78,19 @@ struct pv_init_ops {
11958 */
11959 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11960 unsigned long addr, unsigned len);
11961 -};
11962 +} __no_const;
11963
11964
11965 struct pv_lazy_ops {
11966 /* Set deferred update mode, used for batching operations. */
11967 void (*enter)(void);
11968 void (*leave)(void);
11969 -};
11970 +} __no_const;
11971
11972 struct pv_time_ops {
11973 unsigned long long (*sched_clock)(void);
11974 unsigned long (*get_tsc_khz)(void);
11975 -};
11976 +} __no_const;
11977
11978 struct pv_cpu_ops {
11979 /* hooks for various privileged instructions */
11980 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
11981
11982 void (*start_context_switch)(struct task_struct *prev);
11983 void (*end_context_switch)(struct task_struct *next);
11984 -};
11985 +} __no_const;
11986
11987 struct pv_irq_ops {
11988 /*
11989 @@ -217,7 +217,7 @@ struct pv_apic_ops {
11990 unsigned long start_eip,
11991 unsigned long start_esp);
11992 #endif
11993 -};
11994 +} __no_const;
11995
11996 struct pv_mmu_ops {
11997 unsigned long (*read_cr2)(void);
11998 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
11999 struct paravirt_callee_save make_pud;
12000
12001 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
12002 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
12003 #endif /* PAGETABLE_LEVELS == 4 */
12004 #endif /* PAGETABLE_LEVELS >= 3 */
12005
12006 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
12007 an mfn. We can tell which is which from the index. */
12008 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
12009 phys_addr_t phys, pgprot_t flags);
12010 +
12011 +#ifdef CONFIG_PAX_KERNEXEC
12012 + unsigned long (*pax_open_kernel)(void);
12013 + unsigned long (*pax_close_kernel)(void);
12014 +#endif
12015 +
12016 };
12017
12018 struct raw_spinlock;
12019 @@ -326,7 +333,7 @@ struct pv_lock_ops {
12020 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
12021 int (*spin_trylock)(struct raw_spinlock *lock);
12022 void (*spin_unlock)(struct raw_spinlock *lock);
12023 -};
12024 +} __no_const;
12025
12026 /* This contains all the paravirt structures: we get a convenient
12027 * number for each function using the offset which we use to indicate
12028 diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
12029 index b399988..3f47c38 100644
12030 --- a/arch/x86/include/asm/pci_x86.h
12031 +++ b/arch/x86/include/asm/pci_x86.h
12032 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
12033 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
12034
12035 struct pci_raw_ops {
12036 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
12037 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
12038 int reg, int len, u32 *val);
12039 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
12040 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
12041 int reg, int len, u32 val);
12042 };
12043
12044 -extern struct pci_raw_ops *raw_pci_ops;
12045 -extern struct pci_raw_ops *raw_pci_ext_ops;
12046 +extern const struct pci_raw_ops *raw_pci_ops;
12047 +extern const struct pci_raw_ops *raw_pci_ext_ops;
12048
12049 -extern struct pci_raw_ops pci_direct_conf1;
12050 +extern const struct pci_raw_ops pci_direct_conf1;
12051 extern bool port_cf9_safe;
12052
12053 /* arch_initcall level */
12054 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
12055 index b65a36d..50345a4 100644
12056 --- a/arch/x86/include/asm/percpu.h
12057 +++ b/arch/x86/include/asm/percpu.h
12058 @@ -78,6 +78,7 @@ do { \
12059 if (0) { \
12060 T__ tmp__; \
12061 tmp__ = (val); \
12062 + (void)tmp__; \
12063 } \
12064 switch (sizeof(var)) { \
12065 case 1: \
12066 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
12067 index 271de94..ef944d6 100644
12068 --- a/arch/x86/include/asm/pgalloc.h
12069 +++ b/arch/x86/include/asm/pgalloc.h
12070 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
12071 pmd_t *pmd, pte_t *pte)
12072 {
12073 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12074 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
12075 +}
12076 +
12077 +static inline void pmd_populate_user(struct mm_struct *mm,
12078 + pmd_t *pmd, pte_t *pte)
12079 +{
12080 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12081 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
12082 }
12083
12084 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
12085 index 2334982..70bc412 100644
12086 --- a/arch/x86/include/asm/pgtable-2level.h
12087 +++ b/arch/x86/include/asm/pgtable-2level.h
12088 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
12089
12090 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12091 {
12092 + pax_open_kernel();
12093 *pmdp = pmd;
12094 + pax_close_kernel();
12095 }
12096
12097 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12098 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
12099 index 33927d2..ccde329 100644
12100 --- a/arch/x86/include/asm/pgtable-3level.h
12101 +++ b/arch/x86/include/asm/pgtable-3level.h
12102 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12103
12104 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12105 {
12106 + pax_open_kernel();
12107 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
12108 + pax_close_kernel();
12109 }
12110
12111 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12112 {
12113 + pax_open_kernel();
12114 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
12115 + pax_close_kernel();
12116 }
12117
12118 /*
12119 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
12120 index af6fd36..867ff74 100644
12121 --- a/arch/x86/include/asm/pgtable.h
12122 +++ b/arch/x86/include/asm/pgtable.h
12123 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
12124
12125 #ifndef __PAGETABLE_PUD_FOLDED
12126 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
12127 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
12128 #define pgd_clear(pgd) native_pgd_clear(pgd)
12129 #endif
12130
12131 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
12132
12133 #define arch_end_context_switch(prev) do {} while(0)
12134
12135 +#define pax_open_kernel() native_pax_open_kernel()
12136 +#define pax_close_kernel() native_pax_close_kernel()
12137 #endif /* CONFIG_PARAVIRT */
12138
12139 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
12140 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
12141 +
12142 +#ifdef CONFIG_PAX_KERNEXEC
12143 +static inline unsigned long native_pax_open_kernel(void)
12144 +{
12145 + unsigned long cr0;
12146 +
12147 + preempt_disable();
12148 + barrier();
12149 + cr0 = read_cr0() ^ X86_CR0_WP;
12150 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
12151 + write_cr0(cr0);
12152 + return cr0 ^ X86_CR0_WP;
12153 +}
12154 +
12155 +static inline unsigned long native_pax_close_kernel(void)
12156 +{
12157 + unsigned long cr0;
12158 +
12159 + cr0 = read_cr0() ^ X86_CR0_WP;
12160 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
12161 + write_cr0(cr0);
12162 + barrier();
12163 + preempt_enable_no_resched();
12164 + return cr0 ^ X86_CR0_WP;
12165 +}
12166 +#else
12167 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
12168 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
12169 +#endif
12170 +
12171 /*
12172 * The following only work if pte_present() is true.
12173 * Undefined behaviour if not..
12174 */
12175 +static inline int pte_user(pte_t pte)
12176 +{
12177 + return pte_val(pte) & _PAGE_USER;
12178 +}
12179 +
12180 static inline int pte_dirty(pte_t pte)
12181 {
12182 return pte_flags(pte) & _PAGE_DIRTY;
12183 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
12184 return pte_clear_flags(pte, _PAGE_RW);
12185 }
12186
12187 +static inline pte_t pte_mkread(pte_t pte)
12188 +{
12189 + return __pte(pte_val(pte) | _PAGE_USER);
12190 +}
12191 +
12192 static inline pte_t pte_mkexec(pte_t pte)
12193 {
12194 - return pte_clear_flags(pte, _PAGE_NX);
12195 +#ifdef CONFIG_X86_PAE
12196 + if (__supported_pte_mask & _PAGE_NX)
12197 + return pte_clear_flags(pte, _PAGE_NX);
12198 + else
12199 +#endif
12200 + return pte_set_flags(pte, _PAGE_USER);
12201 +}
12202 +
12203 +static inline pte_t pte_exprotect(pte_t pte)
12204 +{
12205 +#ifdef CONFIG_X86_PAE
12206 + if (__supported_pte_mask & _PAGE_NX)
12207 + return pte_set_flags(pte, _PAGE_NX);
12208 + else
12209 +#endif
12210 + return pte_clear_flags(pte, _PAGE_USER);
12211 }
12212
12213 static inline pte_t pte_mkdirty(pte_t pte)
12214 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
12215 #endif
12216
12217 #ifndef __ASSEMBLY__
12218 +
12219 +#ifdef CONFIG_PAX_PER_CPU_PGD
12220 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
12221 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
12222 +{
12223 + return cpu_pgd[cpu];
12224 +}
12225 +#endif
12226 +
12227 #include <linux/mm_types.h>
12228
12229 static inline int pte_none(pte_t pte)
12230 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
12231
12232 static inline int pgd_bad(pgd_t pgd)
12233 {
12234 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
12235 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
12236 }
12237
12238 static inline int pgd_none(pgd_t pgd)
12239 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
12240 * pgd_offset() returns a (pgd_t *)
12241 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
12242 */
12243 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
12244 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
12245 +
12246 +#ifdef CONFIG_PAX_PER_CPU_PGD
12247 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
12248 +#endif
12249 +
12250 /*
12251 * a shortcut which implies the use of the kernel's pgd, instead
12252 * of a process's
12253 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
12254 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
12255 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
12256
12257 +#ifdef CONFIG_X86_32
12258 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
12259 +#else
12260 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
12261 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
12262 +
12263 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12264 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
12265 +#else
12266 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
12267 +#endif
12268 +
12269 +#endif
12270 +
12271 #ifndef __ASSEMBLY__
12272
12273 extern int direct_gbpages;
12274 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
12275 * dst and src can be on the same page, but the range must not overlap,
12276 * and must not cross a page boundary.
12277 */
12278 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
12279 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
12280 {
12281 - memcpy(dst, src, count * sizeof(pgd_t));
12282 + pax_open_kernel();
12283 + while (count--)
12284 + *dst++ = *src++;
12285 + pax_close_kernel();
12286 }
12287
12288 +#ifdef CONFIG_PAX_PER_CPU_PGD
12289 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
12290 +#endif
12291 +
12292 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12293 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
12294 +#else
12295 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
12296 +#endif
12297
12298 #include <asm-generic/pgtable.h>
12299 #endif /* __ASSEMBLY__ */
12300 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
12301 index 750f1bf..971e8394 100644
12302 --- a/arch/x86/include/asm/pgtable_32.h
12303 +++ b/arch/x86/include/asm/pgtable_32.h
12304 @@ -26,9 +26,6 @@
12305 struct mm_struct;
12306 struct vm_area_struct;
12307
12308 -extern pgd_t swapper_pg_dir[1024];
12309 -extern pgd_t trampoline_pg_dir[1024];
12310 -
12311 static inline void pgtable_cache_init(void) { }
12312 static inline void check_pgt_cache(void) { }
12313 void paging_init(void);
12314 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12315 # include <asm/pgtable-2level.h>
12316 #endif
12317
12318 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
12319 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
12320 +#ifdef CONFIG_X86_PAE
12321 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
12322 +#endif
12323 +
12324 #if defined(CONFIG_HIGHPTE)
12325 #define __KM_PTE \
12326 (in_nmi() ? KM_NMI_PTE : \
12327 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12328 /* Clear a kernel PTE and flush it from the TLB */
12329 #define kpte_clear_flush(ptep, vaddr) \
12330 do { \
12331 + pax_open_kernel(); \
12332 pte_clear(&init_mm, (vaddr), (ptep)); \
12333 + pax_close_kernel(); \
12334 __flush_tlb_one((vaddr)); \
12335 } while (0)
12336
12337 @@ -85,6 +90,9 @@ do { \
12338
12339 #endif /* !__ASSEMBLY__ */
12340
12341 +#define HAVE_ARCH_UNMAPPED_AREA
12342 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
12343 +
12344 /*
12345 * kern_addr_valid() is (1) for FLATMEM and (0) for
12346 * SPARSEMEM and DISCONTIGMEM
12347 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
12348 index 5e67c15..12d5c47 100644
12349 --- a/arch/x86/include/asm/pgtable_32_types.h
12350 +++ b/arch/x86/include/asm/pgtable_32_types.h
12351 @@ -8,7 +8,7 @@
12352 */
12353 #ifdef CONFIG_X86_PAE
12354 # include <asm/pgtable-3level_types.h>
12355 -# define PMD_SIZE (1UL << PMD_SHIFT)
12356 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
12357 # define PMD_MASK (~(PMD_SIZE - 1))
12358 #else
12359 # include <asm/pgtable-2level_types.h>
12360 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
12361 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
12362 #endif
12363
12364 +#ifdef CONFIG_PAX_KERNEXEC
12365 +#ifndef __ASSEMBLY__
12366 +extern unsigned char MODULES_EXEC_VADDR[];
12367 +extern unsigned char MODULES_EXEC_END[];
12368 +#endif
12369 +#include <asm/boot.h>
12370 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
12371 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
12372 +#else
12373 +#define ktla_ktva(addr) (addr)
12374 +#define ktva_ktla(addr) (addr)
12375 +#endif
12376 +
12377 #define MODULES_VADDR VMALLOC_START
12378 #define MODULES_END VMALLOC_END
12379 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
12380 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
12381 index c57a301..6b414ff 100644
12382 --- a/arch/x86/include/asm/pgtable_64.h
12383 +++ b/arch/x86/include/asm/pgtable_64.h
12384 @@ -16,10 +16,14 @@
12385
12386 extern pud_t level3_kernel_pgt[512];
12387 extern pud_t level3_ident_pgt[512];
12388 +extern pud_t level3_vmalloc_start_pgt[512];
12389 +extern pud_t level3_vmalloc_end_pgt[512];
12390 +extern pud_t level3_vmemmap_pgt[512];
12391 +extern pud_t level2_vmemmap_pgt[512];
12392 extern pmd_t level2_kernel_pgt[512];
12393 extern pmd_t level2_fixmap_pgt[512];
12394 -extern pmd_t level2_ident_pgt[512];
12395 -extern pgd_t init_level4_pgt[];
12396 +extern pmd_t level2_ident_pgt[512*2];
12397 +extern pgd_t init_level4_pgt[512];
12398
12399 #define swapper_pg_dir init_level4_pgt
12400
12401 @@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
12402
12403 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12404 {
12405 + pax_open_kernel();
12406 *pmdp = pmd;
12407 + pax_close_kernel();
12408 }
12409
12410 static inline void native_pmd_clear(pmd_t *pmd)
12411 @@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
12412
12413 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
12414 {
12415 + pax_open_kernel();
12416 + *pgdp = pgd;
12417 + pax_close_kernel();
12418 +}
12419 +
12420 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12421 +{
12422 *pgdp = pgd;
12423 }
12424
12425 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
12426 index 766ea16..5b96cb3 100644
12427 --- a/arch/x86/include/asm/pgtable_64_types.h
12428 +++ b/arch/x86/include/asm/pgtable_64_types.h
12429 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
12430 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
12431 #define MODULES_END _AC(0xffffffffff000000, UL)
12432 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
12433 +#define MODULES_EXEC_VADDR MODULES_VADDR
12434 +#define MODULES_EXEC_END MODULES_END
12435 +
12436 +#define ktla_ktva(addr) (addr)
12437 +#define ktva_ktla(addr) (addr)
12438
12439 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
12440 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
12441 index d1f4a76..2f46ba1 100644
12442 --- a/arch/x86/include/asm/pgtable_types.h
12443 +++ b/arch/x86/include/asm/pgtable_types.h
12444 @@ -16,12 +16,11 @@
12445 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
12446 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
12447 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
12448 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
12449 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
12450 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
12451 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
12452 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
12453 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
12454 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
12455 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
12456 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
12457
12458 /* If _PAGE_BIT_PRESENT is clear, we use these: */
12459 @@ -39,7 +38,6 @@
12460 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
12461 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
12462 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
12463 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
12464 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
12465 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
12466 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
12467 @@ -55,8 +53,10 @@
12468
12469 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
12470 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
12471 -#else
12472 +#elif defined(CONFIG_KMEMCHECK)
12473 #define _PAGE_NX (_AT(pteval_t, 0))
12474 +#else
12475 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
12476 #endif
12477
12478 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
12479 @@ -93,6 +93,9 @@
12480 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
12481 _PAGE_ACCESSED)
12482
12483 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
12484 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
12485 +
12486 #define __PAGE_KERNEL_EXEC \
12487 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
12488 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
12489 @@ -103,8 +106,8 @@
12490 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
12491 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
12492 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
12493 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
12494 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
12495 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
12496 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
12497 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
12498 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
12499 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
12500 @@ -163,8 +166,8 @@
12501 * bits are combined, this will alow user to access the high address mapped
12502 * VDSO in the presence of CONFIG_COMPAT_VDSO
12503 */
12504 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
12505 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
12506 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12507 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12508 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
12509 #endif
12510
12511 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
12512 {
12513 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
12514 }
12515 +#endif
12516
12517 +#if PAGETABLE_LEVELS == 3
12518 +#include <asm-generic/pgtable-nopud.h>
12519 +#endif
12520 +
12521 +#if PAGETABLE_LEVELS == 2
12522 +#include <asm-generic/pgtable-nopmd.h>
12523 +#endif
12524 +
12525 +#ifndef __ASSEMBLY__
12526 #if PAGETABLE_LEVELS > 3
12527 typedef struct { pudval_t pud; } pud_t;
12528
12529 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
12530 return pud.pud;
12531 }
12532 #else
12533 -#include <asm-generic/pgtable-nopud.h>
12534 -
12535 static inline pudval_t native_pud_val(pud_t pud)
12536 {
12537 return native_pgd_val(pud.pgd);
12538 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
12539 return pmd.pmd;
12540 }
12541 #else
12542 -#include <asm-generic/pgtable-nopmd.h>
12543 -
12544 static inline pmdval_t native_pmd_val(pmd_t pmd)
12545 {
12546 return native_pgd_val(pmd.pud.pgd);
12547 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
12548
12549 extern pteval_t __supported_pte_mask;
12550 extern void set_nx(void);
12551 +
12552 +#ifdef CONFIG_X86_32
12553 +#ifdef CONFIG_X86_PAE
12554 extern int nx_enabled;
12555 +#else
12556 +#define nx_enabled (0)
12557 +#endif
12558 +#else
12559 +#define nx_enabled (1)
12560 +#endif
12561
12562 #define pgprot_writecombine pgprot_writecombine
12563 extern pgprot_t pgprot_writecombine(pgprot_t prot);
12564 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
12565 index fa04dea..5f823fc 100644
12566 --- a/arch/x86/include/asm/processor.h
12567 +++ b/arch/x86/include/asm/processor.h
12568 @@ -272,7 +272,7 @@ struct tss_struct {
12569
12570 } ____cacheline_aligned;
12571
12572 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
12573 +extern struct tss_struct init_tss[NR_CPUS];
12574
12575 /*
12576 * Save the original ist values for checking stack pointers during debugging
12577 @@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
12578 */
12579 #define TASK_SIZE PAGE_OFFSET
12580 #define TASK_SIZE_MAX TASK_SIZE
12581 +
12582 +#ifdef CONFIG_PAX_SEGMEXEC
12583 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
12584 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
12585 +#else
12586 #define STACK_TOP TASK_SIZE
12587 -#define STACK_TOP_MAX STACK_TOP
12588 +#endif
12589 +
12590 +#define STACK_TOP_MAX TASK_SIZE
12591
12592 #define INIT_THREAD { \
12593 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
12594 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12595 .vm86_info = NULL, \
12596 .sysenter_cs = __KERNEL_CS, \
12597 .io_bitmap_ptr = NULL, \
12598 @@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
12599 */
12600 #define INIT_TSS { \
12601 .x86_tss = { \
12602 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
12603 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12604 .ss0 = __KERNEL_DS, \
12605 .ss1 = __KERNEL_CS, \
12606 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
12607 @@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
12608 extern unsigned long thread_saved_pc(struct task_struct *tsk);
12609
12610 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
12611 -#define KSTK_TOP(info) \
12612 -({ \
12613 - unsigned long *__ptr = (unsigned long *)(info); \
12614 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
12615 -})
12616 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
12617
12618 /*
12619 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
12620 @@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12621 #define task_pt_regs(task) \
12622 ({ \
12623 struct pt_regs *__regs__; \
12624 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
12625 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
12626 __regs__ - 1; \
12627 })
12628
12629 @@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12630 /*
12631 * User space process size. 47bits minus one guard page.
12632 */
12633 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
12634 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
12635
12636 /* This decides where the kernel will search for a free chunk of vm
12637 * space during mmap's.
12638 */
12639 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
12640 - 0xc0000000 : 0xFFFFe000)
12641 + 0xc0000000 : 0xFFFFf000)
12642
12643 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
12644 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
12645 @@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12646 #define STACK_TOP_MAX TASK_SIZE_MAX
12647
12648 #define INIT_THREAD { \
12649 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12650 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12651 }
12652
12653 #define INIT_TSS { \
12654 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12655 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12656 }
12657
12658 /*
12659 @@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
12660 */
12661 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
12662
12663 +#ifdef CONFIG_PAX_SEGMEXEC
12664 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
12665 +#endif
12666 +
12667 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
12668
12669 /* Get/set a process' ability to use the timestamp counter instruction */
12670 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
12671 index 0f0d908..f2e3da2 100644
12672 --- a/arch/x86/include/asm/ptrace.h
12673 +++ b/arch/x86/include/asm/ptrace.h
12674 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
12675 }
12676
12677 /*
12678 - * user_mode_vm(regs) determines whether a register set came from user mode.
12679 + * user_mode(regs) determines whether a register set came from user mode.
12680 * This is true if V8086 mode was enabled OR if the register set was from
12681 * protected mode with RPL-3 CS value. This tricky test checks that with
12682 * one comparison. Many places in the kernel can bypass this full check
12683 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
12684 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
12685 + * be used.
12686 */
12687 -static inline int user_mode(struct pt_regs *regs)
12688 +static inline int user_mode_novm(struct pt_regs *regs)
12689 {
12690 #ifdef CONFIG_X86_32
12691 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
12692 #else
12693 - return !!(regs->cs & 3);
12694 + return !!(regs->cs & SEGMENT_RPL_MASK);
12695 #endif
12696 }
12697
12698 -static inline int user_mode_vm(struct pt_regs *regs)
12699 +static inline int user_mode(struct pt_regs *regs)
12700 {
12701 #ifdef CONFIG_X86_32
12702 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
12703 USER_RPL;
12704 #else
12705 - return user_mode(regs);
12706 + return user_mode_novm(regs);
12707 #endif
12708 }
12709
12710 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12711 index 562d4fd..6e39df1 100644
12712 --- a/arch/x86/include/asm/reboot.h
12713 +++ b/arch/x86/include/asm/reboot.h
12714 @@ -6,19 +6,19 @@
12715 struct pt_regs;
12716
12717 struct machine_ops {
12718 - void (*restart)(char *cmd);
12719 - void (*halt)(void);
12720 - void (*power_off)(void);
12721 + void (* __noreturn restart)(char *cmd);
12722 + void (* __noreturn halt)(void);
12723 + void (* __noreturn power_off)(void);
12724 void (*shutdown)(void);
12725 void (*crash_shutdown)(struct pt_regs *);
12726 - void (*emergency_restart)(void);
12727 -};
12728 + void (* __noreturn emergency_restart)(void);
12729 +} __no_const;
12730
12731 extern struct machine_ops machine_ops;
12732
12733 void native_machine_crash_shutdown(struct pt_regs *regs);
12734 void native_machine_shutdown(void);
12735 -void machine_real_restart(const unsigned char *code, int length);
12736 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
12737
12738 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
12739 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
12740 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12741 index 606ede1..dbfff37 100644
12742 --- a/arch/x86/include/asm/rwsem.h
12743 +++ b/arch/x86/include/asm/rwsem.h
12744 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12745 {
12746 asm volatile("# beginning down_read\n\t"
12747 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12748 +
12749 +#ifdef CONFIG_PAX_REFCOUNT
12750 + "jno 0f\n"
12751 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
12752 + "int $4\n0:\n"
12753 + _ASM_EXTABLE(0b, 0b)
12754 +#endif
12755 +
12756 /* adds 0x00000001, returns the old value */
12757 " jns 1f\n"
12758 " call call_rwsem_down_read_failed\n"
12759 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12760 "1:\n\t"
12761 " mov %1,%2\n\t"
12762 " add %3,%2\n\t"
12763 +
12764 +#ifdef CONFIG_PAX_REFCOUNT
12765 + "jno 0f\n"
12766 + "sub %3,%2\n"
12767 + "int $4\n0:\n"
12768 + _ASM_EXTABLE(0b, 0b)
12769 +#endif
12770 +
12771 " jle 2f\n\t"
12772 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12773 " jnz 1b\n\t"
12774 @@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12775 tmp = RWSEM_ACTIVE_WRITE_BIAS;
12776 asm volatile("# beginning down_write\n\t"
12777 LOCK_PREFIX " xadd %1,(%2)\n\t"
12778 +
12779 +#ifdef CONFIG_PAX_REFCOUNT
12780 + "jno 0f\n"
12781 + "mov %1,(%2)\n"
12782 + "int $4\n0:\n"
12783 + _ASM_EXTABLE(0b, 0b)
12784 +#endif
12785 +
12786 /* subtract 0x0000ffff, returns the old value */
12787 " test %1,%1\n\t"
12788 /* was the count 0 before? */
12789 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12790 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
12791 asm volatile("# beginning __up_read\n\t"
12792 LOCK_PREFIX " xadd %1,(%2)\n\t"
12793 +
12794 +#ifdef CONFIG_PAX_REFCOUNT
12795 + "jno 0f\n"
12796 + "mov %1,(%2)\n"
12797 + "int $4\n0:\n"
12798 + _ASM_EXTABLE(0b, 0b)
12799 +#endif
12800 +
12801 /* subtracts 1, returns the old value */
12802 " jns 1f\n\t"
12803 " call call_rwsem_wake\n"
12804 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12805 rwsem_count_t tmp;
12806 asm volatile("# beginning __up_write\n\t"
12807 LOCK_PREFIX " xadd %1,(%2)\n\t"
12808 +
12809 +#ifdef CONFIG_PAX_REFCOUNT
12810 + "jno 0f\n"
12811 + "mov %1,(%2)\n"
12812 + "int $4\n0:\n"
12813 + _ASM_EXTABLE(0b, 0b)
12814 +#endif
12815 +
12816 /* tries to transition
12817 0xffff0001 -> 0x00000000 */
12818 " jz 1f\n"
12819 @@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12820 {
12821 asm volatile("# beginning __downgrade_write\n\t"
12822 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12823 +
12824 +#ifdef CONFIG_PAX_REFCOUNT
12825 + "jno 0f\n"
12826 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12827 + "int $4\n0:\n"
12828 + _ASM_EXTABLE(0b, 0b)
12829 +#endif
12830 +
12831 /*
12832 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12833 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12834 @@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12835 static inline void rwsem_atomic_add(rwsem_count_t delta,
12836 struct rw_semaphore *sem)
12837 {
12838 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12839 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12840 +
12841 +#ifdef CONFIG_PAX_REFCOUNT
12842 + "jno 0f\n"
12843 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
12844 + "int $4\n0:\n"
12845 + _ASM_EXTABLE(0b, 0b)
12846 +#endif
12847 +
12848 : "+m" (sem->count)
12849 : "er" (delta));
12850 }
12851 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
12852 {
12853 rwsem_count_t tmp = delta;
12854
12855 - asm volatile(LOCK_PREFIX "xadd %0,%1"
12856 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
12857 +
12858 +#ifdef CONFIG_PAX_REFCOUNT
12859 + "jno 0f\n"
12860 + "mov %0,%1\n"
12861 + "int $4\n0:\n"
12862 + _ASM_EXTABLE(0b, 0b)
12863 +#endif
12864 +
12865 : "+r" (tmp), "+m" (sem->count)
12866 : : "memory");
12867
12868 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12869 index 14e0ed8..7f7dd5e 100644
12870 --- a/arch/x86/include/asm/segment.h
12871 +++ b/arch/x86/include/asm/segment.h
12872 @@ -62,10 +62,15 @@
12873 * 26 - ESPFIX small SS
12874 * 27 - per-cpu [ offset to per-cpu data area ]
12875 * 28 - stack_canary-20 [ for stack protector ]
12876 - * 29 - unused
12877 - * 30 - unused
12878 + * 29 - PCI BIOS CS
12879 + * 30 - PCI BIOS DS
12880 * 31 - TSS for double fault handler
12881 */
12882 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12883 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12884 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12885 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12886 +
12887 #define GDT_ENTRY_TLS_MIN 6
12888 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12889
12890 @@ -77,6 +82,8 @@
12891
12892 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
12893
12894 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12895 +
12896 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
12897
12898 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
12899 @@ -88,7 +95,7 @@
12900 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
12901 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
12902
12903 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12904 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12905 #ifdef CONFIG_SMP
12906 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
12907 #else
12908 @@ -102,6 +109,12 @@
12909 #define __KERNEL_STACK_CANARY 0
12910 #endif
12911
12912 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
12913 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12914 +
12915 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
12916 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12917 +
12918 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12919
12920 /*
12921 @@ -139,7 +152,7 @@
12922 */
12923
12924 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12925 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12926 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12927
12928
12929 #else
12930 @@ -163,6 +176,8 @@
12931 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12932 #define __USER32_DS __USER_DS
12933
12934 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12935 +
12936 #define GDT_ENTRY_TSS 8 /* needs two entries */
12937 #define GDT_ENTRY_LDT 10 /* needs two entries */
12938 #define GDT_ENTRY_TLS_MIN 12
12939 @@ -183,6 +198,7 @@
12940 #endif
12941
12942 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12943 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12944 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12945 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12946 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12947 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12948 index 4c2f63c..5685db2 100644
12949 --- a/arch/x86/include/asm/smp.h
12950 +++ b/arch/x86/include/asm/smp.h
12951 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
12952 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12953 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12954 DECLARE_PER_CPU(u16, cpu_llc_id);
12955 -DECLARE_PER_CPU(int, cpu_number);
12956 +DECLARE_PER_CPU(unsigned int, cpu_number);
12957
12958 static inline struct cpumask *cpu_sibling_mask(int cpu)
12959 {
12960 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12961 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12962
12963 /* Static state in head.S used to set up a CPU */
12964 -extern struct {
12965 - void *sp;
12966 - unsigned short ss;
12967 -} stack_start;
12968 +extern unsigned long stack_start; /* Initial stack pointer address */
12969
12970 struct smp_ops {
12971 void (*smp_prepare_boot_cpu)(void);
12972 @@ -60,7 +57,7 @@ struct smp_ops {
12973
12974 void (*send_call_func_ipi)(const struct cpumask *mask);
12975 void (*send_call_func_single_ipi)(int cpu);
12976 -};
12977 +} __no_const;
12978
12979 /* Globals due to paravirt */
12980 extern void set_cpu_sibling_map(int cpu);
12981 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12982 extern int safe_smp_processor_id(void);
12983
12984 #elif defined(CONFIG_X86_64_SMP)
12985 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12986 -
12987 -#define stack_smp_processor_id() \
12988 -({ \
12989 - struct thread_info *ti; \
12990 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12991 - ti->cpu; \
12992 -})
12993 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12994 +#define stack_smp_processor_id() raw_smp_processor_id()
12995 #define safe_smp_processor_id() smp_processor_id()
12996
12997 #endif
12998 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12999 index 4e77853..4359783 100644
13000 --- a/arch/x86/include/asm/spinlock.h
13001 +++ b/arch/x86/include/asm/spinlock.h
13002 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
13003 static inline void __raw_read_lock(raw_rwlock_t *rw)
13004 {
13005 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
13006 +
13007 +#ifdef CONFIG_PAX_REFCOUNT
13008 + "jno 0f\n"
13009 + LOCK_PREFIX " addl $1,(%0)\n"
13010 + "int $4\n0:\n"
13011 + _ASM_EXTABLE(0b, 0b)
13012 +#endif
13013 +
13014 "jns 1f\n"
13015 "call __read_lock_failed\n\t"
13016 "1:\n"
13017 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
13018 static inline void __raw_write_lock(raw_rwlock_t *rw)
13019 {
13020 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
13021 +
13022 +#ifdef CONFIG_PAX_REFCOUNT
13023 + "jno 0f\n"
13024 + LOCK_PREFIX " addl %1,(%0)\n"
13025 + "int $4\n0:\n"
13026 + _ASM_EXTABLE(0b, 0b)
13027 +#endif
13028 +
13029 "jz 1f\n"
13030 "call __write_lock_failed\n\t"
13031 "1:\n"
13032 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
13033
13034 static inline void __raw_read_unlock(raw_rwlock_t *rw)
13035 {
13036 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
13037 + asm volatile(LOCK_PREFIX "incl %0\n"
13038 +
13039 +#ifdef CONFIG_PAX_REFCOUNT
13040 + "jno 0f\n"
13041 + LOCK_PREFIX "decl %0\n"
13042 + "int $4\n0:\n"
13043 + _ASM_EXTABLE(0b, 0b)
13044 +#endif
13045 +
13046 + :"+m" (rw->lock) : : "memory");
13047 }
13048
13049 static inline void __raw_write_unlock(raw_rwlock_t *rw)
13050 {
13051 - asm volatile(LOCK_PREFIX "addl %1, %0"
13052 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
13053 +
13054 +#ifdef CONFIG_PAX_REFCOUNT
13055 + "jno 0f\n"
13056 + LOCK_PREFIX "subl %1, %0\n"
13057 + "int $4\n0:\n"
13058 + _ASM_EXTABLE(0b, 0b)
13059 +#endif
13060 +
13061 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
13062 }
13063
13064 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
13065 index 1575177..cb23f52 100644
13066 --- a/arch/x86/include/asm/stackprotector.h
13067 +++ b/arch/x86/include/asm/stackprotector.h
13068 @@ -48,7 +48,7 @@
13069 * head_32 for boot CPU and setup_per_cpu_areas() for others.
13070 */
13071 #define GDT_STACK_CANARY_INIT \
13072 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
13073 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
13074
13075 /*
13076 * Initialize the stackprotector canary value.
13077 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
13078
13079 static inline void load_stack_canary_segment(void)
13080 {
13081 -#ifdef CONFIG_X86_32
13082 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
13083 asm volatile ("mov %0, %%gs" : : "r" (0));
13084 #endif
13085 }
13086 diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
13087 index 1bb6e39..234246f 100644
13088 --- a/arch/x86/include/asm/syscalls.h
13089 +++ b/arch/x86/include/asm/syscalls.h
13090 @@ -24,7 +24,7 @@ int sys_fork(struct pt_regs *);
13091 int sys_vfork(struct pt_regs *);
13092
13093 /* kernel/ldt.c */
13094 -asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
13095 +asmlinkage int sys_modify_ldt(int, void __user *, unsigned long) __size_overflow(3);
13096
13097 /* kernel/signal.c */
13098 long sys_rt_sigreturn(struct pt_regs *);
13099 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
13100 index e0fbf29..858ef4a 100644
13101 --- a/arch/x86/include/asm/system.h
13102 +++ b/arch/x86/include/asm/system.h
13103 @@ -132,7 +132,7 @@ do { \
13104 "thread_return:\n\t" \
13105 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
13106 __switch_canary \
13107 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
13108 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
13109 "movq %%rax,%%rdi\n\t" \
13110 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
13111 "jnz ret_from_fork\n\t" \
13112 @@ -143,7 +143,7 @@ do { \
13113 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
13114 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
13115 [_tif_fork] "i" (_TIF_FORK), \
13116 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
13117 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
13118 [current_task] "m" (per_cpu_var(current_task)) \
13119 __switch_canary_iparam \
13120 : "memory", "cc" __EXTRA_CLOBBER)
13121 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
13122 {
13123 unsigned long __limit;
13124 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
13125 - return __limit + 1;
13126 + return __limit;
13127 }
13128
13129 static inline void native_clts(void)
13130 @@ -340,12 +340,12 @@ void enable_hlt(void);
13131
13132 void cpu_idle_wait(void);
13133
13134 -extern unsigned long arch_align_stack(unsigned long sp);
13135 +#define arch_align_stack(x) ((x) & ~0xfUL)
13136 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
13137
13138 void default_idle(void);
13139
13140 -void stop_this_cpu(void *dummy);
13141 +void stop_this_cpu(void *dummy) __noreturn;
13142
13143 /*
13144 * Force strict CPU ordering.
13145 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
13146 index 19c3ce4..8962535 100644
13147 --- a/arch/x86/include/asm/thread_info.h
13148 +++ b/arch/x86/include/asm/thread_info.h
13149 @@ -10,6 +10,7 @@
13150 #include <linux/compiler.h>
13151 #include <asm/page.h>
13152 #include <asm/types.h>
13153 +#include <asm/percpu.h>
13154
13155 /*
13156 * low level task data that entry.S needs immediate access to
13157 @@ -24,7 +25,6 @@ struct exec_domain;
13158 #include <asm/atomic.h>
13159
13160 struct thread_info {
13161 - struct task_struct *task; /* main task structure */
13162 struct exec_domain *exec_domain; /* execution domain */
13163 __u32 flags; /* low level flags */
13164 __u32 status; /* thread synchronous flags */
13165 @@ -34,18 +34,12 @@ struct thread_info {
13166 mm_segment_t addr_limit;
13167 struct restart_block restart_block;
13168 void __user *sysenter_return;
13169 -#ifdef CONFIG_X86_32
13170 - unsigned long previous_esp; /* ESP of the previous stack in
13171 - case of nested (IRQ) stacks
13172 - */
13173 - __u8 supervisor_stack[0];
13174 -#endif
13175 + unsigned long lowest_stack;
13176 int uaccess_err;
13177 };
13178
13179 -#define INIT_THREAD_INFO(tsk) \
13180 +#define INIT_THREAD_INFO \
13181 { \
13182 - .task = &tsk, \
13183 .exec_domain = &default_exec_domain, \
13184 .flags = 0, \
13185 .cpu = 0, \
13186 @@ -56,7 +50,7 @@ struct thread_info {
13187 }, \
13188 }
13189
13190 -#define init_thread_info (init_thread_union.thread_info)
13191 +#define init_thread_info (init_thread_union.stack)
13192 #define init_stack (init_thread_union.stack)
13193
13194 #else /* !__ASSEMBLY__ */
13195 @@ -163,45 +157,40 @@ struct thread_info {
13196 #define alloc_thread_info(tsk) \
13197 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
13198
13199 -#ifdef CONFIG_X86_32
13200 -
13201 -#define STACK_WARN (THREAD_SIZE/8)
13202 -/*
13203 - * macros/functions for gaining access to the thread information structure
13204 - *
13205 - * preempt_count needs to be 1 initially, until the scheduler is functional.
13206 - */
13207 -#ifndef __ASSEMBLY__
13208 -
13209 -
13210 -/* how to get the current stack pointer from C */
13211 -register unsigned long current_stack_pointer asm("esp") __used;
13212 -
13213 -/* how to get the thread information struct from C */
13214 -static inline struct thread_info *current_thread_info(void)
13215 -{
13216 - return (struct thread_info *)
13217 - (current_stack_pointer & ~(THREAD_SIZE - 1));
13218 -}
13219 -
13220 -#else /* !__ASSEMBLY__ */
13221 -
13222 +#ifdef __ASSEMBLY__
13223 /* how to get the thread information struct from ASM */
13224 #define GET_THREAD_INFO(reg) \
13225 - movl $-THREAD_SIZE, reg; \
13226 - andl %esp, reg
13227 + mov PER_CPU_VAR(current_tinfo), reg
13228
13229 /* use this one if reg already contains %esp */
13230 -#define GET_THREAD_INFO_WITH_ESP(reg) \
13231 - andl $-THREAD_SIZE, reg
13232 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
13233 +#else
13234 +/* how to get the thread information struct from C */
13235 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
13236 +
13237 +static __always_inline struct thread_info *current_thread_info(void)
13238 +{
13239 + return percpu_read_stable(current_tinfo);
13240 +}
13241 +#endif
13242 +
13243 +#ifdef CONFIG_X86_32
13244 +
13245 +#define STACK_WARN (THREAD_SIZE/8)
13246 +/*
13247 + * macros/functions for gaining access to the thread information structure
13248 + *
13249 + * preempt_count needs to be 1 initially, until the scheduler is functional.
13250 + */
13251 +#ifndef __ASSEMBLY__
13252 +
13253 +/* how to get the current stack pointer from C */
13254 +register unsigned long current_stack_pointer asm("esp") __used;
13255
13256 #endif
13257
13258 #else /* X86_32 */
13259
13260 -#include <asm/percpu.h>
13261 -#define KERNEL_STACK_OFFSET (5*8)
13262 -
13263 /*
13264 * macros/functions for gaining access to the thread information structure
13265 * preempt_count needs to be 1 initially, until the scheduler is functional.
13266 @@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
13267 #ifndef __ASSEMBLY__
13268 DECLARE_PER_CPU(unsigned long, kernel_stack);
13269
13270 -static inline struct thread_info *current_thread_info(void)
13271 -{
13272 - struct thread_info *ti;
13273 - ti = (void *)(percpu_read_stable(kernel_stack) +
13274 - KERNEL_STACK_OFFSET - THREAD_SIZE);
13275 - return ti;
13276 -}
13277 -
13278 -#else /* !__ASSEMBLY__ */
13279 -
13280 -/* how to get the thread information struct from ASM */
13281 -#define GET_THREAD_INFO(reg) \
13282 - movq PER_CPU_VAR(kernel_stack),reg ; \
13283 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
13284 -
13285 +/* how to get the current stack pointer from C */
13286 +register unsigned long current_stack_pointer asm("rsp") __used;
13287 #endif
13288
13289 #endif /* !X86_32 */
13290 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
13291 extern void free_thread_info(struct thread_info *ti);
13292 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
13293 #define arch_task_cache_init arch_task_cache_init
13294 +
13295 +#define __HAVE_THREAD_FUNCTIONS
13296 +#define task_thread_info(task) (&(task)->tinfo)
13297 +#define task_stack_page(task) ((task)->stack)
13298 +#define setup_thread_stack(p, org) do {} while (0)
13299 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
13300 +
13301 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
13302 +extern struct task_struct *alloc_task_struct(void);
13303 +extern void free_task_struct(struct task_struct *);
13304 +
13305 #endif
13306 #endif /* _ASM_X86_THREAD_INFO_H */
13307 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
13308 index 61c5874..8a046e9 100644
13309 --- a/arch/x86/include/asm/uaccess.h
13310 +++ b/arch/x86/include/asm/uaccess.h
13311 @@ -8,12 +8,15 @@
13312 #include <linux/thread_info.h>
13313 #include <linux/prefetch.h>
13314 #include <linux/string.h>
13315 +#include <linux/sched.h>
13316 #include <asm/asm.h>
13317 #include <asm/page.h>
13318
13319 #define VERIFY_READ 0
13320 #define VERIFY_WRITE 1
13321
13322 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
13323 +
13324 /*
13325 * The fs value determines whether argument validity checking should be
13326 * performed or not. If get_fs() == USER_DS, checking is performed, with
13327 @@ -29,7 +32,12 @@
13328
13329 #define get_ds() (KERNEL_DS)
13330 #define get_fs() (current_thread_info()->addr_limit)
13331 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13332 +void __set_fs(mm_segment_t x);
13333 +void set_fs(mm_segment_t x);
13334 +#else
13335 #define set_fs(x) (current_thread_info()->addr_limit = (x))
13336 +#endif
13337
13338 #define segment_eq(a, b) ((a).seg == (b).seg)
13339
13340 @@ -77,7 +85,33 @@
13341 * checks that the pointer is in the user space range - after calling
13342 * this function, memory access functions may still return -EFAULT.
13343 */
13344 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
13345 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
13346 +#define access_ok(type, addr, size) \
13347 +({ \
13348 + long __size = size; \
13349 + unsigned long __addr = (unsigned long)addr; \
13350 + unsigned long __addr_ao = __addr & PAGE_MASK; \
13351 + unsigned long __end_ao = __addr + __size - 1; \
13352 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
13353 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
13354 + while(__addr_ao <= __end_ao) { \
13355 + char __c_ao; \
13356 + __addr_ao += PAGE_SIZE; \
13357 + if (__size > PAGE_SIZE) \
13358 + cond_resched(); \
13359 + if (__get_user(__c_ao, (char __user *)__addr)) \
13360 + break; \
13361 + if (type != VERIFY_WRITE) { \
13362 + __addr = __addr_ao; \
13363 + continue; \
13364 + } \
13365 + if (__put_user(__c_ao, (char __user *)__addr)) \
13366 + break; \
13367 + __addr = __addr_ao; \
13368 + } \
13369 + } \
13370 + __ret_ao; \
13371 +})
13372
13373 /*
13374 * The exception table consists of pairs of addresses: the first is the
13375 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
13376 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
13377 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
13378
13379 -
13380 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13381 +#define __copyuser_seg "gs;"
13382 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
13383 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
13384 +#else
13385 +#define __copyuser_seg
13386 +#define __COPYUSER_SET_ES
13387 +#define __COPYUSER_RESTORE_ES
13388 +#endif
13389
13390 #ifdef CONFIG_X86_32
13391 #define __put_user_asm_u64(x, addr, err, errret) \
13392 - asm volatile("1: movl %%eax,0(%2)\n" \
13393 - "2: movl %%edx,4(%2)\n" \
13394 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
13395 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
13396 "3:\n" \
13397 ".section .fixup,\"ax\"\n" \
13398 "4: movl %3,%0\n" \
13399 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
13400 : "A" (x), "r" (addr), "i" (errret), "0" (err))
13401
13402 #define __put_user_asm_ex_u64(x, addr) \
13403 - asm volatile("1: movl %%eax,0(%1)\n" \
13404 - "2: movl %%edx,4(%1)\n" \
13405 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
13406 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
13407 "3:\n" \
13408 _ASM_EXTABLE(1b, 2b - 1b) \
13409 _ASM_EXTABLE(2b, 3b - 2b) \
13410 @@ -253,7 +295,7 @@ extern void __put_user_8(void);
13411 __typeof__(*(ptr)) __pu_val; \
13412 __chk_user_ptr(ptr); \
13413 might_fault(); \
13414 - __pu_val = x; \
13415 + __pu_val = (x); \
13416 switch (sizeof(*(ptr))) { \
13417 case 1: \
13418 __put_user_x(1, __pu_val, ptr, __ret_pu); \
13419 @@ -374,7 +416,7 @@ do { \
13420 } while (0)
13421
13422 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13423 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
13424 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
13425 "2:\n" \
13426 ".section .fixup,\"ax\"\n" \
13427 "3: mov %3,%0\n" \
13428 @@ -382,7 +424,7 @@ do { \
13429 " jmp 2b\n" \
13430 ".previous\n" \
13431 _ASM_EXTABLE(1b, 3b) \
13432 - : "=r" (err), ltype(x) \
13433 + : "=r" (err), ltype (x) \
13434 : "m" (__m(addr)), "i" (errret), "0" (err))
13435
13436 #define __get_user_size_ex(x, ptr, size) \
13437 @@ -407,7 +449,7 @@ do { \
13438 } while (0)
13439
13440 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
13441 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
13442 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
13443 "2:\n" \
13444 _ASM_EXTABLE(1b, 2b - 1b) \
13445 : ltype(x) : "m" (__m(addr)))
13446 @@ -424,13 +466,24 @@ do { \
13447 int __gu_err; \
13448 unsigned long __gu_val; \
13449 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
13450 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
13451 + (x) = (__typeof__(*(ptr)))__gu_val; \
13452 __gu_err; \
13453 })
13454
13455 /* FIXME: this hack is definitely wrong -AK */
13456 struct __large_struct { unsigned long buf[100]; };
13457 -#define __m(x) (*(struct __large_struct __user *)(x))
13458 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13459 +#define ____m(x) \
13460 +({ \
13461 + unsigned long ____x = (unsigned long)(x); \
13462 + if (____x < PAX_USER_SHADOW_BASE) \
13463 + ____x += PAX_USER_SHADOW_BASE; \
13464 + (void __user *)____x; \
13465 +})
13466 +#else
13467 +#define ____m(x) (x)
13468 +#endif
13469 +#define __m(x) (*(struct __large_struct __user *)____m(x))
13470
13471 /*
13472 * Tell gcc we read from memory instead of writing: this is because
13473 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
13474 * aliasing issues.
13475 */
13476 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13477 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
13478 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
13479 "2:\n" \
13480 ".section .fixup,\"ax\"\n" \
13481 "3: mov %3,%0\n" \
13482 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
13483 ".previous\n" \
13484 _ASM_EXTABLE(1b, 3b) \
13485 : "=r"(err) \
13486 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
13487 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
13488
13489 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
13490 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
13491 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
13492 "2:\n" \
13493 _ASM_EXTABLE(1b, 2b - 1b) \
13494 : : ltype(x), "m" (__m(addr)))
13495 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
13496 * On error, the variable @x is set to zero.
13497 */
13498
13499 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13500 +#define __get_user(x, ptr) get_user((x), (ptr))
13501 +#else
13502 #define __get_user(x, ptr) \
13503 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
13504 +#endif
13505
13506 /**
13507 * __put_user: - Write a simple value into user space, with less checking.
13508 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
13509 * Returns zero on success, or -EFAULT on error.
13510 */
13511
13512 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13513 +#define __put_user(x, ptr) put_user((x), (ptr))
13514 +#else
13515 #define __put_user(x, ptr) \
13516 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
13517 +#endif
13518
13519 #define __get_user_unaligned __get_user
13520 #define __put_user_unaligned __put_user
13521 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
13522 #define get_user_ex(x, ptr) do { \
13523 unsigned long __gue_val; \
13524 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
13525 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
13526 + (x) = (__typeof__(*(ptr)))__gue_val; \
13527 } while (0)
13528
13529 #ifdef CONFIG_X86_WP_WORKS_OK
13530 @@ -567,6 +628,7 @@ extern struct movsl_mask {
13531
13532 #define ARCH_HAS_NOCACHE_UACCESS 1
13533
13534 +#define ARCH_HAS_SORT_EXTABLE
13535 #ifdef CONFIG_X86_32
13536 # include "uaccess_32.h"
13537 #else
13538 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
13539 index 632fb44..2a195ea 100644
13540 --- a/arch/x86/include/asm/uaccess_32.h
13541 +++ b/arch/x86/include/asm/uaccess_32.h
13542 @@ -12,15 +12,15 @@
13543 #include <asm/page.h>
13544
13545 unsigned long __must_check __copy_to_user_ll
13546 - (void __user *to, const void *from, unsigned long n);
13547 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
13548 unsigned long __must_check __copy_from_user_ll
13549 - (void *to, const void __user *from, unsigned long n);
13550 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13551 unsigned long __must_check __copy_from_user_ll_nozero
13552 - (void *to, const void __user *from, unsigned long n);
13553 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13554 unsigned long __must_check __copy_from_user_ll_nocache
13555 - (void *to, const void __user *from, unsigned long n);
13556 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13557 unsigned long __must_check __copy_from_user_ll_nocache_nozero
13558 - (void *to, const void __user *from, unsigned long n);
13559 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13560
13561 /**
13562 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
13563 @@ -42,8 +42,15 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
13564 */
13565
13566 static __always_inline unsigned long __must_check
13567 +__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13568 +static __always_inline unsigned long __must_check
13569 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13570 {
13571 + pax_track_stack();
13572 +
13573 + if ((long)n < 0)
13574 + return n;
13575 +
13576 if (__builtin_constant_p(n)) {
13577 unsigned long ret;
13578
13579 @@ -62,6 +69,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13580 return ret;
13581 }
13582 }
13583 + if (!__builtin_constant_p(n))
13584 + check_object_size(from, n, true);
13585 return __copy_to_user_ll(to, from, n);
13586 }
13587
13588 @@ -80,15 +89,23 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13589 * On success, this will be zero.
13590 */
13591 static __always_inline unsigned long __must_check
13592 +__copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13593 +static __always_inline unsigned long __must_check
13594 __copy_to_user(void __user *to, const void *from, unsigned long n)
13595 {
13596 might_fault();
13597 +
13598 return __copy_to_user_inatomic(to, from, n);
13599 }
13600
13601 static __always_inline unsigned long
13602 +__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13603 +static __always_inline unsigned long
13604 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13605 {
13606 + if ((long)n < 0)
13607 + return n;
13608 +
13609 /* Avoid zeroing the tail if the copy fails..
13610 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
13611 * but as the zeroing behaviour is only significant when n is not
13612 @@ -135,9 +152,17 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13613 * for explanation of why this is needed.
13614 */
13615 static __always_inline unsigned long
13616 +__copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13617 +static __always_inline unsigned long
13618 __copy_from_user(void *to, const void __user *from, unsigned long n)
13619 {
13620 might_fault();
13621 +
13622 + pax_track_stack();
13623 +
13624 + if ((long)n < 0)
13625 + return n;
13626 +
13627 if (__builtin_constant_p(n)) {
13628 unsigned long ret;
13629
13630 @@ -153,13 +178,21 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
13631 return ret;
13632 }
13633 }
13634 + if (!__builtin_constant_p(n))
13635 + check_object_size(to, n, false);
13636 return __copy_from_user_ll(to, from, n);
13637 }
13638
13639 static __always_inline unsigned long __copy_from_user_nocache(void *to,
13640 + const void __user *from, unsigned long n) __size_overflow(3);
13641 +static __always_inline unsigned long __copy_from_user_nocache(void *to,
13642 const void __user *from, unsigned long n)
13643 {
13644 might_fault();
13645 +
13646 + if ((long)n < 0)
13647 + return n;
13648 +
13649 if (__builtin_constant_p(n)) {
13650 unsigned long ret;
13651
13652 @@ -180,20 +213,75 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
13653
13654 static __always_inline unsigned long
13655 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
13656 + unsigned long n) __size_overflow(3);
13657 +static __always_inline unsigned long
13658 +__copy_from_user_inatomic_nocache(void *to, const void __user *from,
13659 unsigned long n)
13660 {
13661 - return __copy_from_user_ll_nocache_nozero(to, from, n);
13662 + if ((long)n < 0)
13663 + return n;
13664 +
13665 + return __copy_from_user_ll_nocache_nozero(to, from, n);
13666 +}
13667 +
13668 +/**
13669 + * copy_to_user: - Copy a block of data into user space.
13670 + * @to: Destination address, in user space.
13671 + * @from: Source address, in kernel space.
13672 + * @n: Number of bytes to copy.
13673 + *
13674 + * Context: User context only. This function may sleep.
13675 + *
13676 + * Copy data from kernel space to user space.
13677 + *
13678 + * Returns number of bytes that could not be copied.
13679 + * On success, this will be zero.
13680 + */
13681 +static __always_inline unsigned long __must_check
13682 +copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13683 +static __always_inline unsigned long __must_check
13684 +copy_to_user(void __user *to, const void *from, unsigned long n)
13685 +{
13686 + if (access_ok(VERIFY_WRITE, to, n))
13687 + n = __copy_to_user(to, from, n);
13688 + return n;
13689 +}
13690 +
13691 +/**
13692 + * copy_from_user: - Copy a block of data from user space.
13693 + * @to: Destination address, in kernel space.
13694 + * @from: Source address, in user space.
13695 + * @n: Number of bytes to copy.
13696 + *
13697 + * Context: User context only. This function may sleep.
13698 + *
13699 + * Copy data from user space to kernel space.
13700 + *
13701 + * Returns number of bytes that could not be copied.
13702 + * On success, this will be zero.
13703 + *
13704 + * If some data could not be copied, this function will pad the copied
13705 + * data to the requested size using zero bytes.
13706 + */
13707 +static __always_inline unsigned long __must_check
13708 +copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13709 +static __always_inline unsigned long __must_check
13710 +copy_from_user(void *to, const void __user *from, unsigned long n)
13711 +{
13712 + if (access_ok(VERIFY_READ, from, n))
13713 + n = __copy_from_user(to, from, n);
13714 + else if ((long)n > 0) {
13715 + if (!__builtin_constant_p(n))
13716 + check_object_size(to, n, false);
13717 + memset(to, 0, n);
13718 + }
13719 + return n;
13720 }
13721
13722 -unsigned long __must_check copy_to_user(void __user *to,
13723 - const void *from, unsigned long n);
13724 -unsigned long __must_check copy_from_user(void *to,
13725 - const void __user *from,
13726 - unsigned long n);
13727 long __must_check strncpy_from_user(char *dst, const char __user *src,
13728 - long count);
13729 + unsigned long count) __size_overflow(3);
13730 long __must_check __strncpy_from_user(char *dst,
13731 - const char __user *src, long count);
13732 + const char __user *src, unsigned long count) __size_overflow(3);
13733
13734 /**
13735 * strlen_user: - Get the size of a string in user space.
13736 @@ -211,8 +299,8 @@ long __must_check __strncpy_from_user(char *dst,
13737 */
13738 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13739
13740 -long strnlen_user(const char __user *str, long n);
13741 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13742 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13743 +long strnlen_user(const char __user *str, unsigned long n);
13744 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13745 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13746
13747 #endif /* _ASM_X86_UACCESS_32_H */
13748 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13749 index db24b21..d0d2413 100644
13750 --- a/arch/x86/include/asm/uaccess_64.h
13751 +++ b/arch/x86/include/asm/uaccess_64.h
13752 @@ -9,6 +9,9 @@
13753 #include <linux/prefetch.h>
13754 #include <linux/lockdep.h>
13755 #include <asm/page.h>
13756 +#include <asm/pgtable.h>
13757 +
13758 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
13759
13760 /*
13761 * Copy To/From Userspace
13762 @@ -16,116 +19,215 @@
13763
13764 /* Handles exceptions in both to and from, but doesn't do access_ok */
13765 __must_check unsigned long
13766 -copy_user_generic(void *to, const void *from, unsigned len);
13767 +copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13768
13769 __must_check unsigned long
13770 -copy_to_user(void __user *to, const void *from, unsigned len);
13771 -__must_check unsigned long
13772 -copy_from_user(void *to, const void __user *from, unsigned len);
13773 -__must_check unsigned long
13774 -copy_in_user(void __user *to, const void __user *from, unsigned len);
13775 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13776
13777 static __always_inline __must_check
13778 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
13779 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13780 +static __always_inline __must_check
13781 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13782 {
13783 - int ret = 0;
13784 + unsigned ret = 0;
13785
13786 might_fault();
13787 - if (!__builtin_constant_p(size))
13788 - return copy_user_generic(dst, (__force void *)src, size);
13789 +
13790 + if (size > INT_MAX)
13791 + return size;
13792 +
13793 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13794 + if (!__access_ok(VERIFY_READ, src, size))
13795 + return size;
13796 +#endif
13797 +
13798 + if (!__builtin_constant_p(size)) {
13799 + check_object_size(dst, size, false);
13800 +
13801 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13802 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13803 + src += PAX_USER_SHADOW_BASE;
13804 +#endif
13805 +
13806 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13807 + }
13808 switch (size) {
13809 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13810 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13811 ret, "b", "b", "=q", 1);
13812 return ret;
13813 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13814 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13815 ret, "w", "w", "=r", 2);
13816 return ret;
13817 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13818 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13819 ret, "l", "k", "=r", 4);
13820 return ret;
13821 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13822 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13823 ret, "q", "", "=r", 8);
13824 return ret;
13825 case 10:
13826 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13827 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13828 ret, "q", "", "=r", 10);
13829 if (unlikely(ret))
13830 return ret;
13831 __get_user_asm(*(u16 *)(8 + (char *)dst),
13832 - (u16 __user *)(8 + (char __user *)src),
13833 + (const u16 __user *)(8 + (const char __user *)src),
13834 ret, "w", "w", "=r", 2);
13835 return ret;
13836 case 16:
13837 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13838 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13839 ret, "q", "", "=r", 16);
13840 if (unlikely(ret))
13841 return ret;
13842 __get_user_asm(*(u64 *)(8 + (char *)dst),
13843 - (u64 __user *)(8 + (char __user *)src),
13844 + (const u64 __user *)(8 + (const char __user *)src),
13845 ret, "q", "", "=r", 8);
13846 return ret;
13847 default:
13848 - return copy_user_generic(dst, (__force void *)src, size);
13849 +
13850 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13851 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13852 + src += PAX_USER_SHADOW_BASE;
13853 +#endif
13854 +
13855 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13856 }
13857 }
13858
13859 static __always_inline __must_check
13860 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
13861 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13862 +static __always_inline __must_check
13863 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13864 {
13865 - int ret = 0;
13866 + unsigned ret = 0;
13867
13868 might_fault();
13869 - if (!__builtin_constant_p(size))
13870 - return copy_user_generic((__force void *)dst, src, size);
13871 +
13872 + pax_track_stack();
13873 +
13874 + if (size > INT_MAX)
13875 + return size;
13876 +
13877 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13878 + if (!__access_ok(VERIFY_WRITE, dst, size))
13879 + return size;
13880 +#endif
13881 +
13882 + if (!__builtin_constant_p(size)) {
13883 + check_object_size(src, size, true);
13884 +
13885 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13886 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13887 + dst += PAX_USER_SHADOW_BASE;
13888 +#endif
13889 +
13890 + return copy_user_generic((__force_kernel void *)dst, src, size);
13891 + }
13892 switch (size) {
13893 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13894 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13895 ret, "b", "b", "iq", 1);
13896 return ret;
13897 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13898 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13899 ret, "w", "w", "ir", 2);
13900 return ret;
13901 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13902 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13903 ret, "l", "k", "ir", 4);
13904 return ret;
13905 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13906 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13907 ret, "q", "", "er", 8);
13908 return ret;
13909 case 10:
13910 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13911 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13912 ret, "q", "", "er", 10);
13913 if (unlikely(ret))
13914 return ret;
13915 asm("":::"memory");
13916 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13917 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13918 ret, "w", "w", "ir", 2);
13919 return ret;
13920 case 16:
13921 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13922 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13923 ret, "q", "", "er", 16);
13924 if (unlikely(ret))
13925 return ret;
13926 asm("":::"memory");
13927 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13928 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13929 ret, "q", "", "er", 8);
13930 return ret;
13931 default:
13932 - return copy_user_generic((__force void *)dst, src, size);
13933 +
13934 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13935 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13936 + dst += PAX_USER_SHADOW_BASE;
13937 +#endif
13938 +
13939 + return copy_user_generic((__force_kernel void *)dst, src, size);
13940 }
13941 }
13942
13943 static __always_inline __must_check
13944 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13945 +unsigned long copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13946 +static __always_inline __must_check
13947 +unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
13948 {
13949 - int ret = 0;
13950 + if (access_ok(VERIFY_WRITE, to, len))
13951 + len = __copy_to_user(to, from, len);
13952 + return len;
13953 +}
13954
13955 +static __always_inline __must_check
13956 +unsigned long copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13957 +static __always_inline __must_check
13958 +unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
13959 +{
13960 might_fault();
13961 - if (!__builtin_constant_p(size))
13962 - return copy_user_generic((__force void *)dst,
13963 - (__force void *)src, size);
13964 +
13965 + if (access_ok(VERIFY_READ, from, len))
13966 + len = __copy_from_user(to, from, len);
13967 + else if (len < INT_MAX) {
13968 + if (!__builtin_constant_p(len))
13969 + check_object_size(to, len, false);
13970 + memset(to, 0, len);
13971 + }
13972 + return len;
13973 +}
13974 +
13975 +static __always_inline __must_check
13976 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) __size_overflow(3);
13977 +static __always_inline __must_check
13978 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13979 +{
13980 + unsigned ret = 0;
13981 +
13982 + might_fault();
13983 +
13984 + pax_track_stack();
13985 +
13986 + if (size > INT_MAX)
13987 + return size;
13988 +
13989 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13990 + if (!__access_ok(VERIFY_READ, src, size))
13991 + return size;
13992 + if (!__access_ok(VERIFY_WRITE, dst, size))
13993 + return size;
13994 +#endif
13995 +
13996 + if (!__builtin_constant_p(size)) {
13997 +
13998 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13999 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14000 + src += PAX_USER_SHADOW_BASE;
14001 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14002 + dst += PAX_USER_SHADOW_BASE;
14003 +#endif
14004 +
14005 + return copy_user_generic((__force_kernel void *)dst,
14006 + (__force_kernel const void *)src, size);
14007 + }
14008 switch (size) {
14009 case 1: {
14010 u8 tmp;
14011 - __get_user_asm(tmp, (u8 __user *)src,
14012 + __get_user_asm(tmp, (const u8 __user *)src,
14013 ret, "b", "b", "=q", 1);
14014 if (likely(!ret))
14015 __put_user_asm(tmp, (u8 __user *)dst,
14016 @@ -134,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14017 }
14018 case 2: {
14019 u16 tmp;
14020 - __get_user_asm(tmp, (u16 __user *)src,
14021 + __get_user_asm(tmp, (const u16 __user *)src,
14022 ret, "w", "w", "=r", 2);
14023 if (likely(!ret))
14024 __put_user_asm(tmp, (u16 __user *)dst,
14025 @@ -144,7 +246,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14026
14027 case 4: {
14028 u32 tmp;
14029 - __get_user_asm(tmp, (u32 __user *)src,
14030 + __get_user_asm(tmp, (const u32 __user *)src,
14031 ret, "l", "k", "=r", 4);
14032 if (likely(!ret))
14033 __put_user_asm(tmp, (u32 __user *)dst,
14034 @@ -153,7 +255,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14035 }
14036 case 8: {
14037 u64 tmp;
14038 - __get_user_asm(tmp, (u64 __user *)src,
14039 + __get_user_asm(tmp, (const u64 __user *)src,
14040 ret, "q", "", "=r", 8);
14041 if (likely(!ret))
14042 __put_user_asm(tmp, (u64 __user *)dst,
14043 @@ -161,48 +263,105 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14044 return ret;
14045 }
14046 default:
14047 - return copy_user_generic((__force void *)dst,
14048 - (__force void *)src, size);
14049 +
14050 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14051 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14052 + src += PAX_USER_SHADOW_BASE;
14053 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14054 + dst += PAX_USER_SHADOW_BASE;
14055 +#endif
14056 +
14057 + return copy_user_generic((__force_kernel void *)dst,
14058 + (__force_kernel const void *)src, size);
14059 }
14060 }
14061
14062 __must_check long
14063 -strncpy_from_user(char *dst, const char __user *src, long count);
14064 +strncpy_from_user(char *dst, const char __user *src, unsigned long count) __size_overflow(3);
14065 __must_check long
14066 -__strncpy_from_user(char *dst, const char __user *src, long count);
14067 -__must_check long strnlen_user(const char __user *str, long n);
14068 -__must_check long __strnlen_user(const char __user *str, long n);
14069 +__strncpy_from_user(char *dst, const char __user *src, unsigned long count) __size_overflow(3);
14070 +__must_check long strnlen_user(const char __user *str, unsigned long n) __size_overflow(2);
14071 +__must_check long __strnlen_user(const char __user *str, unsigned long n) __size_overflow(2);
14072 __must_check long strlen_user(const char __user *str);
14073 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
14074 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
14075 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
14076 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
14077
14078 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
14079 - unsigned size);
14080 +static __must_check __always_inline unsigned long
14081 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
14082 +static __must_check __always_inline unsigned long
14083 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
14084 +{
14085 + pax_track_stack();
14086 +
14087 + if (size > INT_MAX)
14088 + return size;
14089 +
14090 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14091 + if (!__access_ok(VERIFY_READ, src, size))
14092 + return size;
14093
14094 -static __must_check __always_inline int
14095 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
14096 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14097 + src += PAX_USER_SHADOW_BASE;
14098 +#endif
14099 +
14100 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
14101 +}
14102 +
14103 +static __must_check __always_inline unsigned long
14104 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
14105 +static __must_check __always_inline unsigned long
14106 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
14107 {
14108 - return copy_user_generic((__force void *)dst, src, size);
14109 + if (size > INT_MAX)
14110 + return size;
14111 +
14112 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14113 + if (!__access_ok(VERIFY_WRITE, dst, size))
14114 + return size;
14115 +
14116 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14117 + dst += PAX_USER_SHADOW_BASE;
14118 +#endif
14119 +
14120 + return copy_user_generic((__force_kernel void *)dst, src, size);
14121 }
14122
14123 -extern long __copy_user_nocache(void *dst, const void __user *src,
14124 - unsigned size, int zerorest);
14125 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
14126 + unsigned long size, int zerorest) __size_overflow(3);
14127
14128 -static inline int
14129 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
14130 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
14131 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
14132 {
14133 might_sleep();
14134 +
14135 + if (size > INT_MAX)
14136 + return size;
14137 +
14138 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14139 + if (!__access_ok(VERIFY_READ, src, size))
14140 + return size;
14141 +#endif
14142 +
14143 return __copy_user_nocache(dst, src, size, 1);
14144 }
14145
14146 -static inline int
14147 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14148 - unsigned size)
14149 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14150 + unsigned long size) __size_overflow(3);
14151 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14152 + unsigned long size)
14153 {
14154 + if (size > INT_MAX)
14155 + return size;
14156 +
14157 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14158 + if (!__access_ok(VERIFY_READ, src, size))
14159 + return size;
14160 +#endif
14161 +
14162 return __copy_user_nocache(dst, src, size, 0);
14163 }
14164
14165 -unsigned long
14166 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
14167 +extern unsigned long
14168 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
14169
14170 #endif /* _ASM_X86_UACCESS_64_H */
14171 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
14172 index 9064052..786cfbc 100644
14173 --- a/arch/x86/include/asm/vdso.h
14174 +++ b/arch/x86/include/asm/vdso.h
14175 @@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
14176 #define VDSO32_SYMBOL(base, name) \
14177 ({ \
14178 extern const char VDSO32_##name[]; \
14179 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
14180 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
14181 })
14182 #endif
14183
14184 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
14185 index 3d61e20..9507180 100644
14186 --- a/arch/x86/include/asm/vgtod.h
14187 +++ b/arch/x86/include/asm/vgtod.h
14188 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
14189 int sysctl_enabled;
14190 struct timezone sys_tz;
14191 struct { /* extract of a clocksource struct */
14192 + char name[8];
14193 cycle_t (*vread)(void);
14194 cycle_t cycle_last;
14195 cycle_t mask;
14196 diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
14197 index 61e08c0..b0da582 100644
14198 --- a/arch/x86/include/asm/vmi.h
14199 +++ b/arch/x86/include/asm/vmi.h
14200 @@ -191,6 +191,7 @@ struct vrom_header {
14201 u8 reserved[96]; /* Reserved for headers */
14202 char vmi_init[8]; /* VMI_Init jump point */
14203 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
14204 + char rom_data[8048]; /* rest of the option ROM */
14205 } __attribute__((packed));
14206
14207 struct pnp_header {
14208 diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
14209 index c6e0bee..fcb9f74 100644
14210 --- a/arch/x86/include/asm/vmi_time.h
14211 +++ b/arch/x86/include/asm/vmi_time.h
14212 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
14213 int (*wallclock_updated)(void);
14214 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
14215 void (*cancel_alarm)(u32 flags);
14216 -} vmi_timer_ops;
14217 +} __no_const vmi_timer_ops;
14218
14219 /* Prototypes */
14220 extern void __init vmi_time_init(void);
14221 diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
14222 index d0983d2..1f7c9e9 100644
14223 --- a/arch/x86/include/asm/vsyscall.h
14224 +++ b/arch/x86/include/asm/vsyscall.h
14225 @@ -15,9 +15,10 @@ enum vsyscall_num {
14226
14227 #ifdef __KERNEL__
14228 #include <linux/seqlock.h>
14229 +#include <linux/getcpu.h>
14230 +#include <linux/time.h>
14231
14232 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
14233 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
14234
14235 /* Definitions for CONFIG_GENERIC_TIME definitions */
14236 #define __section_vsyscall_gtod_data __attribute__ \
14237 @@ -31,7 +32,6 @@ enum vsyscall_num {
14238 #define VGETCPU_LSL 2
14239
14240 extern int __vgetcpu_mode;
14241 -extern volatile unsigned long __jiffies;
14242
14243 /* kernel space (writeable) */
14244 extern int vgetcpu_mode;
14245 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
14246
14247 extern void map_vsyscall(void);
14248
14249 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
14250 +extern time_t vtime(time_t *t);
14251 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
14252 #endif /* __KERNEL__ */
14253
14254 #endif /* _ASM_X86_VSYSCALL_H */
14255 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
14256 index 2c756fd..3377e37 100644
14257 --- a/arch/x86/include/asm/x86_init.h
14258 +++ b/arch/x86/include/asm/x86_init.h
14259 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
14260 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
14261 void (*find_smp_config)(unsigned int reserve);
14262 void (*get_smp_config)(unsigned int early);
14263 -};
14264 +} __no_const;
14265
14266 /**
14267 * struct x86_init_resources - platform specific resource related ops
14268 @@ -42,7 +42,7 @@ struct x86_init_resources {
14269 void (*probe_roms)(void);
14270 void (*reserve_resources)(void);
14271 char *(*memory_setup)(void);
14272 -};
14273 +} __no_const;
14274
14275 /**
14276 * struct x86_init_irqs - platform specific interrupt setup
14277 @@ -55,7 +55,7 @@ struct x86_init_irqs {
14278 void (*pre_vector_init)(void);
14279 void (*intr_init)(void);
14280 void (*trap_init)(void);
14281 -};
14282 +} __no_const;
14283
14284 /**
14285 * struct x86_init_oem - oem platform specific customizing functions
14286 @@ -65,7 +65,7 @@ struct x86_init_irqs {
14287 struct x86_init_oem {
14288 void (*arch_setup)(void);
14289 void (*banner)(void);
14290 -};
14291 +} __no_const;
14292
14293 /**
14294 * struct x86_init_paging - platform specific paging functions
14295 @@ -75,7 +75,7 @@ struct x86_init_oem {
14296 struct x86_init_paging {
14297 void (*pagetable_setup_start)(pgd_t *base);
14298 void (*pagetable_setup_done)(pgd_t *base);
14299 -};
14300 +} __no_const;
14301
14302 /**
14303 * struct x86_init_timers - platform specific timer setup
14304 @@ -88,7 +88,7 @@ struct x86_init_timers {
14305 void (*setup_percpu_clockev)(void);
14306 void (*tsc_pre_init)(void);
14307 void (*timer_init)(void);
14308 -};
14309 +} __no_const;
14310
14311 /**
14312 * struct x86_init_ops - functions for platform specific setup
14313 @@ -101,7 +101,7 @@ struct x86_init_ops {
14314 struct x86_init_oem oem;
14315 struct x86_init_paging paging;
14316 struct x86_init_timers timers;
14317 -};
14318 +} __no_const;
14319
14320 /**
14321 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
14322 @@ -109,7 +109,7 @@ struct x86_init_ops {
14323 */
14324 struct x86_cpuinit_ops {
14325 void (*setup_percpu_clockev)(void);
14326 -};
14327 +} __no_const;
14328
14329 /**
14330 * struct x86_platform_ops - platform specific runtime functions
14331 @@ -121,7 +121,7 @@ struct x86_platform_ops {
14332 unsigned long (*calibrate_tsc)(void);
14333 unsigned long (*get_wallclock)(void);
14334 int (*set_wallclock)(unsigned long nowtime);
14335 -};
14336 +} __no_const;
14337
14338 extern struct x86_init_ops x86_init;
14339 extern struct x86_cpuinit_ops x86_cpuinit;
14340 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
14341 index 727acc1..554f3eb 100644
14342 --- a/arch/x86/include/asm/xsave.h
14343 +++ b/arch/x86/include/asm/xsave.h
14344 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
14345 static inline int xsave_user(struct xsave_struct __user *buf)
14346 {
14347 int err;
14348 +
14349 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14350 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
14351 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
14352 +#endif
14353 +
14354 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
14355 "2:\n"
14356 ".section .fixup,\"ax\"\n"
14357 @@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14358 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
14359 {
14360 int err;
14361 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
14362 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
14363 u32 lmask = mask;
14364 u32 hmask = mask >> 32;
14365
14366 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14367 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
14368 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
14369 +#endif
14370 +
14371 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14372 "2:\n"
14373 ".section .fixup,\"ax\"\n"
14374 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
14375 index 6a564ac..9b1340c 100644
14376 --- a/arch/x86/kernel/acpi/realmode/Makefile
14377 +++ b/arch/x86/kernel/acpi/realmode/Makefile
14378 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
14379 $(call cc-option, -fno-stack-protector) \
14380 $(call cc-option, -mpreferred-stack-boundary=2)
14381 KBUILD_CFLAGS += $(call cc-option, -m32)
14382 +ifdef CONSTIFY_PLUGIN
14383 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
14384 +endif
14385 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
14386 GCOV_PROFILE := n
14387
14388 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
14389 index 580b4e2..d4129e4 100644
14390 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
14391 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
14392 @@ -91,6 +91,9 @@ _start:
14393 /* Do any other stuff... */
14394
14395 #ifndef CONFIG_64BIT
14396 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
14397 + call verify_cpu
14398 +
14399 /* This could also be done in C code... */
14400 movl pmode_cr3, %eax
14401 movl %eax, %cr3
14402 @@ -104,7 +107,7 @@ _start:
14403 movl %eax, %ecx
14404 orl %edx, %ecx
14405 jz 1f
14406 - movl $0xc0000080, %ecx
14407 + mov $MSR_EFER, %ecx
14408 wrmsr
14409 1:
14410
14411 @@ -114,6 +117,7 @@ _start:
14412 movl pmode_cr0, %eax
14413 movl %eax, %cr0
14414 jmp pmode_return
14415 +# include "../../verify_cpu.S"
14416 #else
14417 pushw $0
14418 pushw trampoline_segment
14419 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
14420 index ca93638..7042f24 100644
14421 --- a/arch/x86/kernel/acpi/sleep.c
14422 +++ b/arch/x86/kernel/acpi/sleep.c
14423 @@ -11,11 +11,12 @@
14424 #include <linux/cpumask.h>
14425 #include <asm/segment.h>
14426 #include <asm/desc.h>
14427 +#include <asm/e820.h>
14428
14429 #include "realmode/wakeup.h"
14430 #include "sleep.h"
14431
14432 -unsigned long acpi_wakeup_address;
14433 +unsigned long acpi_wakeup_address = 0x2000;
14434 unsigned long acpi_realmode_flags;
14435
14436 /* address in low memory of the wakeup routine. */
14437 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
14438 #else /* CONFIG_64BIT */
14439 header->trampoline_segment = setup_trampoline() >> 4;
14440 #ifdef CONFIG_SMP
14441 - stack_start.sp = temp_stack + sizeof(temp_stack);
14442 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
14443 +
14444 + pax_open_kernel();
14445 early_gdt_descr.address =
14446 (unsigned long)get_cpu_gdt_table(smp_processor_id());
14447 + pax_close_kernel();
14448 +
14449 initial_gs = per_cpu_offset(smp_processor_id());
14450 #endif
14451 initial_code = (unsigned long)wakeup_long64;
14452 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
14453 return;
14454 }
14455
14456 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
14457 -
14458 - if (!acpi_realmode) {
14459 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
14460 - return;
14461 - }
14462 -
14463 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
14464 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
14465 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
14466 }
14467
14468
14469 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
14470 index 8ded418..079961e 100644
14471 --- a/arch/x86/kernel/acpi/wakeup_32.S
14472 +++ b/arch/x86/kernel/acpi/wakeup_32.S
14473 @@ -30,13 +30,11 @@ wakeup_pmode_return:
14474 # and restore the stack ... but you need gdt for this to work
14475 movl saved_context_esp, %esp
14476
14477 - movl %cs:saved_magic, %eax
14478 - cmpl $0x12345678, %eax
14479 + cmpl $0x12345678, saved_magic
14480 jne bogus_magic
14481
14482 # jump to place where we left off
14483 - movl saved_eip, %eax
14484 - jmp *%eax
14485 + jmp *(saved_eip)
14486
14487 bogus_magic:
14488 jmp bogus_magic
14489 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
14490 index de7353c..075da5f 100644
14491 --- a/arch/x86/kernel/alternative.c
14492 +++ b/arch/x86/kernel/alternative.c
14493 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
14494
14495 BUG_ON(p->len > MAX_PATCH_LEN);
14496 /* prep the buffer with the original instructions */
14497 - memcpy(insnbuf, p->instr, p->len);
14498 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
14499 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
14500 (unsigned long)p->instr, p->len);
14501
14502 @@ -475,7 +475,7 @@ void __init alternative_instructions(void)
14503 if (smp_alt_once)
14504 free_init_pages("SMP alternatives",
14505 (unsigned long)__smp_locks,
14506 - (unsigned long)__smp_locks_end);
14507 + PAGE_ALIGN((unsigned long)__smp_locks_end));
14508
14509 restart_nmi();
14510 }
14511 @@ -492,13 +492,17 @@ void __init alternative_instructions(void)
14512 * instructions. And on the local CPU you need to be protected again NMI or MCE
14513 * handlers seeing an inconsistent instruction while you patch.
14514 */
14515 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
14516 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
14517 size_t len)
14518 {
14519 unsigned long flags;
14520 local_irq_save(flags);
14521 - memcpy(addr, opcode, len);
14522 +
14523 + pax_open_kernel();
14524 + memcpy(ktla_ktva(addr), opcode, len);
14525 sync_core();
14526 + pax_close_kernel();
14527 +
14528 local_irq_restore(flags);
14529 /* Could also do a CLFLUSH here to speed up CPU recovery; but
14530 that causes hangs on some VIA CPUs. */
14531 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
14532 */
14533 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
14534 {
14535 - unsigned long flags;
14536 - char *vaddr;
14537 + unsigned char *vaddr = ktla_ktva(addr);
14538 struct page *pages[2];
14539 - int i;
14540 + size_t i;
14541
14542 if (!core_kernel_text((unsigned long)addr)) {
14543 - pages[0] = vmalloc_to_page(addr);
14544 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
14545 + pages[0] = vmalloc_to_page(vaddr);
14546 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
14547 } else {
14548 - pages[0] = virt_to_page(addr);
14549 + pages[0] = virt_to_page(vaddr);
14550 WARN_ON(!PageReserved(pages[0]));
14551 - pages[1] = virt_to_page(addr + PAGE_SIZE);
14552 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
14553 }
14554 BUG_ON(!pages[0]);
14555 - local_irq_save(flags);
14556 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
14557 - if (pages[1])
14558 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
14559 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
14560 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
14561 - clear_fixmap(FIX_TEXT_POKE0);
14562 - if (pages[1])
14563 - clear_fixmap(FIX_TEXT_POKE1);
14564 - local_flush_tlb();
14565 - sync_core();
14566 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
14567 - that causes hangs on some VIA CPUs. */
14568 + text_poke_early(addr, opcode, len);
14569 for (i = 0; i < len; i++)
14570 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
14571 - local_irq_restore(flags);
14572 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
14573 return addr;
14574 }
14575 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
14576 index 3a44b75..1601800 100644
14577 --- a/arch/x86/kernel/amd_iommu.c
14578 +++ b/arch/x86/kernel/amd_iommu.c
14579 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
14580 }
14581 }
14582
14583 -static struct dma_map_ops amd_iommu_dma_ops = {
14584 +static const struct dma_map_ops amd_iommu_dma_ops = {
14585 .alloc_coherent = alloc_coherent,
14586 .free_coherent = free_coherent,
14587 .map_page = map_page,
14588 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
14589 index 1d2d670..8e3f477 100644
14590 --- a/arch/x86/kernel/apic/apic.c
14591 +++ b/arch/x86/kernel/apic/apic.c
14592 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
14593 /*
14594 * Debug level, exported for io_apic.c
14595 */
14596 -unsigned int apic_verbosity;
14597 +int apic_verbosity;
14598
14599 int pic_mode;
14600
14601 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
14602 apic_write(APIC_ESR, 0);
14603 v1 = apic_read(APIC_ESR);
14604 ack_APIC_irq();
14605 - atomic_inc(&irq_err_count);
14606 + atomic_inc_unchecked(&irq_err_count);
14607
14608 /*
14609 * Here is what the APIC error bits mean:
14610 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
14611 u16 *bios_cpu_apicid;
14612 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
14613
14614 + pax_track_stack();
14615 +
14616 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
14617 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
14618
14619 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
14620 index 8928d97..f799cea 100644
14621 --- a/arch/x86/kernel/apic/io_apic.c
14622 +++ b/arch/x86/kernel/apic/io_apic.c
14623 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
14624 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
14625 GFP_ATOMIC);
14626 if (!ioapic_entries)
14627 - return 0;
14628 + return NULL;
14629
14630 for (apic = 0; apic < nr_ioapics; apic++) {
14631 ioapic_entries[apic] =
14632 @@ -733,7 +733,7 @@ nomem:
14633 kfree(ioapic_entries[apic]);
14634 kfree(ioapic_entries);
14635
14636 - return 0;
14637 + return NULL;
14638 }
14639
14640 /*
14641 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14642 }
14643 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14644
14645 -void lock_vector_lock(void)
14646 +void lock_vector_lock(void) __acquires(vector_lock)
14647 {
14648 /* Used to the online set of cpus does not change
14649 * during assign_irq_vector.
14650 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
14651 spin_lock(&vector_lock);
14652 }
14653
14654 -void unlock_vector_lock(void)
14655 +void unlock_vector_lock(void) __releases(vector_lock)
14656 {
14657 spin_unlock(&vector_lock);
14658 }
14659 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
14660 ack_APIC_irq();
14661 }
14662
14663 -atomic_t irq_mis_count;
14664 +atomic_unchecked_t irq_mis_count;
14665
14666 static void ack_apic_level(unsigned int irq)
14667 {
14668 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
14669
14670 /* Tail end of version 0x11 I/O APIC bug workaround */
14671 if (!(v & (1 << (i & 0x1f)))) {
14672 - atomic_inc(&irq_mis_count);
14673 + atomic_inc_unchecked(&irq_mis_count);
14674 spin_lock(&ioapic_lock);
14675 __mask_and_edge_IO_APIC_irq(cfg);
14676 __unmask_and_level_IO_APIC_irq(cfg);
14677 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14678 index 151ace6..f317474 100644
14679 --- a/arch/x86/kernel/apm_32.c
14680 +++ b/arch/x86/kernel/apm_32.c
14681 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
14682 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14683 * even though they are called in protected mode.
14684 */
14685 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14686 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14687 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14688
14689 static const char driver_version[] = "1.16ac"; /* no spaces */
14690 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
14691 BUG_ON(cpu != 0);
14692 gdt = get_cpu_gdt_table(cpu);
14693 save_desc_40 = gdt[0x40 / 8];
14694 +
14695 + pax_open_kernel();
14696 gdt[0x40 / 8] = bad_bios_desc;
14697 + pax_close_kernel();
14698
14699 apm_irq_save(flags);
14700 APM_DO_SAVE_SEGS;
14701 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
14702 &call->esi);
14703 APM_DO_RESTORE_SEGS;
14704 apm_irq_restore(flags);
14705 +
14706 + pax_open_kernel();
14707 gdt[0x40 / 8] = save_desc_40;
14708 + pax_close_kernel();
14709 +
14710 put_cpu();
14711
14712 return call->eax & 0xff;
14713 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14714 BUG_ON(cpu != 0);
14715 gdt = get_cpu_gdt_table(cpu);
14716 save_desc_40 = gdt[0x40 / 8];
14717 +
14718 + pax_open_kernel();
14719 gdt[0x40 / 8] = bad_bios_desc;
14720 + pax_close_kernel();
14721
14722 apm_irq_save(flags);
14723 APM_DO_SAVE_SEGS;
14724 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14725 &call->eax);
14726 APM_DO_RESTORE_SEGS;
14727 apm_irq_restore(flags);
14728 +
14729 + pax_open_kernel();
14730 gdt[0x40 / 8] = save_desc_40;
14731 + pax_close_kernel();
14732 +
14733 put_cpu();
14734 return error;
14735 }
14736 @@ -975,7 +989,7 @@ recalc:
14737
14738 static void apm_power_off(void)
14739 {
14740 - unsigned char po_bios_call[] = {
14741 + const unsigned char po_bios_call[] = {
14742 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
14743 0x8e, 0xd0, /* movw ax,ss */
14744 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
14745 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
14746 * code to that CPU.
14747 */
14748 gdt = get_cpu_gdt_table(0);
14749 +
14750 + pax_open_kernel();
14751 set_desc_base(&gdt[APM_CS >> 3],
14752 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14753 set_desc_base(&gdt[APM_CS_16 >> 3],
14754 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14755 set_desc_base(&gdt[APM_DS >> 3],
14756 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14757 + pax_close_kernel();
14758
14759 proc_create("apm", 0, NULL, &apm_file_ops);
14760
14761 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
14762 index dfdbf64..9b2b6ce 100644
14763 --- a/arch/x86/kernel/asm-offsets_32.c
14764 +++ b/arch/x86/kernel/asm-offsets_32.c
14765 @@ -51,7 +51,6 @@ void foo(void)
14766 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
14767 BLANK();
14768
14769 - OFFSET(TI_task, thread_info, task);
14770 OFFSET(TI_exec_domain, thread_info, exec_domain);
14771 OFFSET(TI_flags, thread_info, flags);
14772 OFFSET(TI_status, thread_info, status);
14773 @@ -60,6 +59,8 @@ void foo(void)
14774 OFFSET(TI_restart_block, thread_info, restart_block);
14775 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
14776 OFFSET(TI_cpu, thread_info, cpu);
14777 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14778 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14779 BLANK();
14780
14781 OFFSET(GDS_size, desc_ptr, size);
14782 @@ -99,6 +100,7 @@ void foo(void)
14783
14784 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14785 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14786 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14787 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
14788 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
14789 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
14790 @@ -115,6 +117,11 @@ void foo(void)
14791 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
14792 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14793 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14794 +
14795 +#ifdef CONFIG_PAX_KERNEXEC
14796 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14797 +#endif
14798 +
14799 #endif
14800
14801 #ifdef CONFIG_XEN
14802 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14803 index 4a6aeed..371de20 100644
14804 --- a/arch/x86/kernel/asm-offsets_64.c
14805 +++ b/arch/x86/kernel/asm-offsets_64.c
14806 @@ -44,6 +44,8 @@ int main(void)
14807 ENTRY(addr_limit);
14808 ENTRY(preempt_count);
14809 ENTRY(status);
14810 + ENTRY(lowest_stack);
14811 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14812 #ifdef CONFIG_IA32_EMULATION
14813 ENTRY(sysenter_return);
14814 #endif
14815 @@ -63,6 +65,18 @@ int main(void)
14816 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14817 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
14818 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14819 +
14820 +#ifdef CONFIG_PAX_KERNEXEC
14821 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14822 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14823 +#endif
14824 +
14825 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14826 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14827 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14828 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14829 +#endif
14830 +
14831 #endif
14832
14833
14834 @@ -115,6 +129,7 @@ int main(void)
14835 ENTRY(cr8);
14836 BLANK();
14837 #undef ENTRY
14838 + DEFINE(TSS_size, sizeof(struct tss_struct));
14839 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
14840 BLANK();
14841 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
14842 @@ -130,6 +145,7 @@ int main(void)
14843
14844 BLANK();
14845 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14846 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14847 #ifdef CONFIG_XEN
14848 BLANK();
14849 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14850 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14851 index ff502cc..dc5133e 100644
14852 --- a/arch/x86/kernel/cpu/Makefile
14853 +++ b/arch/x86/kernel/cpu/Makefile
14854 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
14855 CFLAGS_REMOVE_common.o = -pg
14856 endif
14857
14858 -# Make sure load_percpu_segment has no stackprotector
14859 -nostackp := $(call cc-option, -fno-stack-protector)
14860 -CFLAGS_common.o := $(nostackp)
14861 -
14862 obj-y := intel_cacheinfo.o addon_cpuid_features.o
14863 obj-y += proc.o capflags.o powerflags.o common.o
14864 obj-y += vmware.o hypervisor.o sched.o
14865 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14866 index 6e082dc..a0b5f36 100644
14867 --- a/arch/x86/kernel/cpu/amd.c
14868 +++ b/arch/x86/kernel/cpu/amd.c
14869 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14870 unsigned int size)
14871 {
14872 /* AMD errata T13 (order #21922) */
14873 - if ((c->x86 == 6)) {
14874 + if (c->x86 == 6) {
14875 /* Duron Rev A0 */
14876 if (c->x86_model == 3 && c->x86_mask == 0)
14877 size = 64;
14878 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14879 index 4e34d10..ba6bc97 100644
14880 --- a/arch/x86/kernel/cpu/common.c
14881 +++ b/arch/x86/kernel/cpu/common.c
14882 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14883
14884 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14885
14886 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14887 -#ifdef CONFIG_X86_64
14888 - /*
14889 - * We need valid kernel segments for data and code in long mode too
14890 - * IRET will check the segment types kkeil 2000/10/28
14891 - * Also sysret mandates a special GDT layout
14892 - *
14893 - * TLS descriptors are currently at a different place compared to i386.
14894 - * Hopefully nobody expects them at a fixed place (Wine?)
14895 - */
14896 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14897 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14898 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14899 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14900 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14901 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14902 -#else
14903 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14904 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14905 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14906 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14907 - /*
14908 - * Segments used for calling PnP BIOS have byte granularity.
14909 - * They code segments and data segments have fixed 64k limits,
14910 - * the transfer segment sizes are set at run time.
14911 - */
14912 - /* 32-bit code */
14913 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14914 - /* 16-bit code */
14915 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14916 - /* 16-bit data */
14917 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14918 - /* 16-bit data */
14919 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14920 - /* 16-bit data */
14921 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14922 - /*
14923 - * The APM segments have byte granularity and their bases
14924 - * are set at run time. All have 64k limits.
14925 - */
14926 - /* 32-bit code */
14927 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14928 - /* 16-bit code */
14929 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14930 - /* data */
14931 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14932 -
14933 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14934 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14935 - GDT_STACK_CANARY_INIT
14936 -#endif
14937 -} };
14938 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14939 -
14940 static int __init x86_xsave_setup(char *s)
14941 {
14942 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14943 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
14944 {
14945 struct desc_ptr gdt_descr;
14946
14947 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14948 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14949 gdt_descr.size = GDT_SIZE - 1;
14950 load_gdt(&gdt_descr);
14951 /* Reload the per-cpu base */
14952 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14953 /* Filter out anything that depends on CPUID levels we don't have */
14954 filter_cpuid_features(c, true);
14955
14956 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14957 + setup_clear_cpu_cap(X86_FEATURE_SEP);
14958 +#endif
14959 +
14960 /* If the model name is still unset, do table lookup. */
14961 if (!c->x86_model_id[0]) {
14962 const char *p;
14963 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
14964 }
14965 __setup("clearcpuid=", setup_disablecpuid);
14966
14967 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14968 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
14969 +
14970 #ifdef CONFIG_X86_64
14971 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14972
14973 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14974 EXPORT_PER_CPU_SYMBOL(current_task);
14975
14976 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14977 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14978 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14979 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14980
14981 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14982 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14983 {
14984 memset(regs, 0, sizeof(struct pt_regs));
14985 regs->fs = __KERNEL_PERCPU;
14986 - regs->gs = __KERNEL_STACK_CANARY;
14987 + savesegment(gs, regs->gs);
14988
14989 return regs;
14990 }
14991 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
14992 int i;
14993
14994 cpu = stack_smp_processor_id();
14995 - t = &per_cpu(init_tss, cpu);
14996 + t = init_tss + cpu;
14997 orig_ist = &per_cpu(orig_ist, cpu);
14998
14999 #ifdef CONFIG_NUMA
15000 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
15001 switch_to_new_gdt(cpu);
15002 loadsegment(fs, 0);
15003
15004 - load_idt((const struct desc_ptr *)&idt_descr);
15005 + load_idt(&idt_descr);
15006
15007 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
15008 syscall_init();
15009 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
15010 wrmsrl(MSR_KERNEL_GS_BASE, 0);
15011 barrier();
15012
15013 - check_efer();
15014 if (cpu != 0)
15015 enable_x2apic();
15016
15017 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
15018 {
15019 int cpu = smp_processor_id();
15020 struct task_struct *curr = current;
15021 - struct tss_struct *t = &per_cpu(init_tss, cpu);
15022 + struct tss_struct *t = init_tss + cpu;
15023 struct thread_struct *thread = &curr->thread;
15024
15025 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
15026 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
15027 index 6a77cca..4f4fca0 100644
15028 --- a/arch/x86/kernel/cpu/intel.c
15029 +++ b/arch/x86/kernel/cpu/intel.c
15030 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
15031 * Update the IDT descriptor and reload the IDT so that
15032 * it uses the read-only mapped virtual address.
15033 */
15034 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
15035 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
15036 load_idt(&idt_descr);
15037 }
15038 #endif
15039 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
15040 index 417990f..96dc36b 100644
15041 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
15042 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
15043 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
15044 return ret;
15045 }
15046
15047 -static struct sysfs_ops sysfs_ops = {
15048 +static const struct sysfs_ops sysfs_ops = {
15049 .show = show,
15050 .store = store,
15051 };
15052 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
15053 index 472763d..aa4d686 100644
15054 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
15055 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
15056 @@ -178,6 +178,8 @@ static void raise_mce(struct mce *m)
15057
15058 /* Error injection interface */
15059 static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15060 + size_t usize, loff_t *off) __size_overflow(3);
15061 +static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15062 size_t usize, loff_t *off)
15063 {
15064 struct mce m;
15065 @@ -211,7 +213,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15066 static int inject_init(void)
15067 {
15068 printk(KERN_INFO "Machine check injector initialized\n");
15069 - mce_chrdev_ops.write = mce_write;
15070 + pax_open_kernel();
15071 + *(void **)&mce_chrdev_ops.write = mce_write;
15072 + pax_close_kernel();
15073 register_die_notifier(&mce_raise_nb);
15074 return 0;
15075 }
15076 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
15077 index 0f16a2b..21740f5 100644
15078 --- a/arch/x86/kernel/cpu/mcheck/mce.c
15079 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
15080 @@ -43,6 +43,7 @@
15081 #include <asm/ipi.h>
15082 #include <asm/mce.h>
15083 #include <asm/msr.h>
15084 +#include <asm/local.h>
15085
15086 #include "mce-internal.h"
15087
15088 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
15089 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
15090 m->cs, m->ip);
15091
15092 - if (m->cs == __KERNEL_CS)
15093 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
15094 print_symbol("{%s}", m->ip);
15095 pr_cont("\n");
15096 }
15097 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
15098
15099 #define PANIC_TIMEOUT 5 /* 5 seconds */
15100
15101 -static atomic_t mce_paniced;
15102 +static atomic_unchecked_t mce_paniced;
15103
15104 static int fake_panic;
15105 -static atomic_t mce_fake_paniced;
15106 +static atomic_unchecked_t mce_fake_paniced;
15107
15108 /* Panic in progress. Enable interrupts and wait for final IPI */
15109 static void wait_for_panic(void)
15110 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15111 /*
15112 * Make sure only one CPU runs in machine check panic
15113 */
15114 - if (atomic_inc_return(&mce_paniced) > 1)
15115 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
15116 wait_for_panic();
15117 barrier();
15118
15119 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15120 console_verbose();
15121 } else {
15122 /* Don't log too much for fake panic */
15123 - if (atomic_inc_return(&mce_fake_paniced) > 1)
15124 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
15125 return;
15126 }
15127 print_mce_head();
15128 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
15129 * might have been modified by someone else.
15130 */
15131 rmb();
15132 - if (atomic_read(&mce_paniced))
15133 + if (atomic_read_unchecked(&mce_paniced))
15134 wait_for_panic();
15135 if (!monarch_timeout)
15136 goto out;
15137 @@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
15138 }
15139
15140 /* Call the installed machine check handler for this CPU setup. */
15141 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
15142 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
15143 unexpected_machine_check;
15144
15145 /*
15146 @@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
15147 return;
15148 }
15149
15150 + pax_open_kernel();
15151 machine_check_vector = do_machine_check;
15152 + pax_close_kernel();
15153
15154 mce_init();
15155 mce_cpu_features(c);
15156 @@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
15157 */
15158
15159 static DEFINE_SPINLOCK(mce_state_lock);
15160 -static int open_count; /* #times opened */
15161 +static local_t open_count; /* #times opened */
15162 static int open_exclu; /* already open exclusive? */
15163
15164 static int mce_open(struct inode *inode, struct file *file)
15165 {
15166 spin_lock(&mce_state_lock);
15167
15168 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
15169 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
15170 spin_unlock(&mce_state_lock);
15171
15172 return -EBUSY;
15173 @@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
15174
15175 if (file->f_flags & O_EXCL)
15176 open_exclu = 1;
15177 - open_count++;
15178 + local_inc(&open_count);
15179
15180 spin_unlock(&mce_state_lock);
15181
15182 @@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
15183 {
15184 spin_lock(&mce_state_lock);
15185
15186 - open_count--;
15187 + local_dec(&open_count);
15188 open_exclu = 0;
15189
15190 spin_unlock(&mce_state_lock);
15191 @@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
15192 static void mce_reset(void)
15193 {
15194 cpu_missing = 0;
15195 - atomic_set(&mce_fake_paniced, 0);
15196 + atomic_set_unchecked(&mce_fake_paniced, 0);
15197 atomic_set(&mce_executing, 0);
15198 atomic_set(&mce_callin, 0);
15199 atomic_set(&global_nwo, 0);
15200 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
15201 index ef3cd31..9d2f6ab 100644
15202 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
15203 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
15204 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
15205 return ret;
15206 }
15207
15208 -static struct sysfs_ops threshold_ops = {
15209 +static const struct sysfs_ops threshold_ops = {
15210 .show = show,
15211 .store = store,
15212 };
15213 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
15214 index 5c0e653..0882b0a 100644
15215 --- a/arch/x86/kernel/cpu/mcheck/p5.c
15216 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
15217 @@ -12,6 +12,7 @@
15218 #include <asm/system.h>
15219 #include <asm/mce.h>
15220 #include <asm/msr.h>
15221 +#include <asm/pgtable.h>
15222
15223 /* By default disabled */
15224 int mce_p5_enabled __read_mostly;
15225 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
15226 if (!cpu_has(c, X86_FEATURE_MCE))
15227 return;
15228
15229 + pax_open_kernel();
15230 machine_check_vector = pentium_machine_check;
15231 + pax_close_kernel();
15232 /* Make sure the vector pointer is visible before we enable MCEs: */
15233 wmb();
15234
15235 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
15236 index 54060f5..c1a7577 100644
15237 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
15238 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
15239 @@ -11,6 +11,7 @@
15240 #include <asm/system.h>
15241 #include <asm/mce.h>
15242 #include <asm/msr.h>
15243 +#include <asm/pgtable.h>
15244
15245 /* Machine check handler for WinChip C6: */
15246 static void winchip_machine_check(struct pt_regs *regs, long error_code)
15247 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
15248 {
15249 u32 lo, hi;
15250
15251 + pax_open_kernel();
15252 machine_check_vector = winchip_machine_check;
15253 + pax_close_kernel();
15254 /* Make sure the vector pointer is visible before we enable MCEs: */
15255 wmb();
15256
15257 diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
15258 index 33af141..92ba9cd 100644
15259 --- a/arch/x86/kernel/cpu/mtrr/amd.c
15260 +++ b/arch/x86/kernel/cpu/mtrr/amd.c
15261 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
15262 return 0;
15263 }
15264
15265 -static struct mtrr_ops amd_mtrr_ops = {
15266 +static const struct mtrr_ops amd_mtrr_ops = {
15267 .vendor = X86_VENDOR_AMD,
15268 .set = amd_set_mtrr,
15269 .get = amd_get_mtrr,
15270 diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
15271 index de89f14..316fe3e 100644
15272 --- a/arch/x86/kernel/cpu/mtrr/centaur.c
15273 +++ b/arch/x86/kernel/cpu/mtrr/centaur.c
15274 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
15275 return 0;
15276 }
15277
15278 -static struct mtrr_ops centaur_mtrr_ops = {
15279 +static const struct mtrr_ops centaur_mtrr_ops = {
15280 .vendor = X86_VENDOR_CENTAUR,
15281 .set = centaur_set_mcr,
15282 .get = centaur_get_mcr,
15283 diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
15284 index 228d982..68a3343 100644
15285 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c
15286 +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
15287 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
15288 post_set();
15289 }
15290
15291 -static struct mtrr_ops cyrix_mtrr_ops = {
15292 +static const struct mtrr_ops cyrix_mtrr_ops = {
15293 .vendor = X86_VENDOR_CYRIX,
15294 .set_all = cyrix_set_all,
15295 .set = cyrix_set_arr,
15296 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
15297 index 55da0c5..4d75584 100644
15298 --- a/arch/x86/kernel/cpu/mtrr/generic.c
15299 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
15300 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
15301 /*
15302 * Generic structure...
15303 */
15304 -struct mtrr_ops generic_mtrr_ops = {
15305 +const struct mtrr_ops generic_mtrr_ops = {
15306 .use_intel_if = 1,
15307 .set_all = generic_set_all,
15308 .get = generic_get_mtrr,
15309 diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
15310 index 3c1b12d..454f6b6 100644
15311 --- a/arch/x86/kernel/cpu/mtrr/if.c
15312 +++ b/arch/x86/kernel/cpu/mtrr/if.c
15313 @@ -89,6 +89,8 @@ mtrr_file_del(unsigned long base, unsigned long size,
15314 * "base=%Lx size=%Lx type=%s" or "disable=%d"
15315 */
15316 static ssize_t
15317 +mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) __size_overflow(3);
15318 +static ssize_t
15319 mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
15320 {
15321 int i, err;
15322 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
15323 index fd60f09..c94ef52 100644
15324 --- a/arch/x86/kernel/cpu/mtrr/main.c
15325 +++ b/arch/x86/kernel/cpu/mtrr/main.c
15326 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
15327 u64 size_or_mask, size_and_mask;
15328 static bool mtrr_aps_delayed_init;
15329
15330 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
15331 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
15332
15333 -struct mtrr_ops *mtrr_if;
15334 +const struct mtrr_ops *mtrr_if;
15335
15336 static void set_mtrr(unsigned int reg, unsigned long base,
15337 unsigned long size, mtrr_type type);
15338
15339 -void set_mtrr_ops(struct mtrr_ops *ops)
15340 +void set_mtrr_ops(const struct mtrr_ops *ops)
15341 {
15342 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
15343 mtrr_ops[ops->vendor] = ops;
15344 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
15345 index a501dee..816c719 100644
15346 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
15347 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15348 @@ -25,14 +25,14 @@ struct mtrr_ops {
15349 int (*validate_add_page)(unsigned long base, unsigned long size,
15350 unsigned int type);
15351 int (*have_wrcomb)(void);
15352 -};
15353 +} __do_const;
15354
15355 extern int generic_get_free_region(unsigned long base, unsigned long size,
15356 int replace_reg);
15357 extern int generic_validate_add_page(unsigned long base, unsigned long size,
15358 unsigned int type);
15359
15360 -extern struct mtrr_ops generic_mtrr_ops;
15361 +extern const struct mtrr_ops generic_mtrr_ops;
15362
15363 extern int positive_have_wrcomb(void);
15364
15365 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
15366 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
15367 void get_mtrr_state(void);
15368
15369 -extern void set_mtrr_ops(struct mtrr_ops *ops);
15370 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
15371
15372 extern u64 size_or_mask, size_and_mask;
15373 -extern struct mtrr_ops *mtrr_if;
15374 +extern const struct mtrr_ops *mtrr_if;
15375
15376 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
15377 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
15378 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
15379 index 0ff02ca..fc49a60 100644
15380 --- a/arch/x86/kernel/cpu/perf_event.c
15381 +++ b/arch/x86/kernel/cpu/perf_event.c
15382 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
15383 * count to the generic event atomically:
15384 */
15385 again:
15386 - prev_raw_count = atomic64_read(&hwc->prev_count);
15387 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
15388 rdmsrl(hwc->event_base + idx, new_raw_count);
15389
15390 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
15391 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
15392 new_raw_count) != prev_raw_count)
15393 goto again;
15394
15395 @@ -741,7 +741,7 @@ again:
15396 delta = (new_raw_count << shift) - (prev_raw_count << shift);
15397 delta >>= shift;
15398
15399 - atomic64_add(delta, &event->count);
15400 + atomic64_add_unchecked(delta, &event->count);
15401 atomic64_sub(delta, &hwc->period_left);
15402
15403 return new_raw_count;
15404 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
15405 * The hw event starts counting from this event offset,
15406 * mark it to be able to extra future deltas:
15407 */
15408 - atomic64_set(&hwc->prev_count, (u64)-left);
15409 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
15410
15411 err = checking_wrmsrl(hwc->event_base + idx,
15412 (u64)(-left) & x86_pmu.event_mask);
15413 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
15414 break;
15415
15416 callchain_store(entry, frame.return_address);
15417 - fp = frame.next_frame;
15418 + fp = (__force const void __user *)frame.next_frame;
15419 }
15420 }
15421
15422 diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
15423 index 898df97..9e82503 100644
15424 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c
15425 +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
15426 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
15427
15428 /* Interface defining a CPU specific perfctr watchdog */
15429 struct wd_ops {
15430 - int (*reserve)(void);
15431 - void (*unreserve)(void);
15432 - int (*setup)(unsigned nmi_hz);
15433 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
15434 - void (*stop)(void);
15435 + int (* const reserve)(void);
15436 + void (* const unreserve)(void);
15437 + int (* const setup)(unsigned nmi_hz);
15438 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
15439 + void (* const stop)(void);
15440 unsigned perfctr;
15441 unsigned evntsel;
15442 u64 checkbit;
15443 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
15444 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
15445 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
15446
15447 +/* cannot be const */
15448 static struct wd_ops intel_arch_wd_ops;
15449
15450 static int setup_intel_arch_watchdog(unsigned nmi_hz)
15451 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
15452 return 1;
15453 }
15454
15455 +/* cannot be const */
15456 static struct wd_ops intel_arch_wd_ops __read_mostly = {
15457 .reserve = single_msr_reserve,
15458 .unreserve = single_msr_unreserve,
15459 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
15460 index ff95824..2ffdcb5 100644
15461 --- a/arch/x86/kernel/crash.c
15462 +++ b/arch/x86/kernel/crash.c
15463 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
15464 regs = args->regs;
15465
15466 #ifdef CONFIG_X86_32
15467 - if (!user_mode_vm(regs)) {
15468 + if (!user_mode(regs)) {
15469 crash_fixup_ss_esp(&fixed_regs, regs);
15470 regs = &fixed_regs;
15471 }
15472 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
15473 index 37250fe..bf2ec74 100644
15474 --- a/arch/x86/kernel/doublefault_32.c
15475 +++ b/arch/x86/kernel/doublefault_32.c
15476 @@ -11,7 +11,7 @@
15477
15478 #define DOUBLEFAULT_STACKSIZE (1024)
15479 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
15480 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
15481 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
15482
15483 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
15484
15485 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
15486 unsigned long gdt, tss;
15487
15488 store_gdt(&gdt_desc);
15489 - gdt = gdt_desc.address;
15490 + gdt = (unsigned long)gdt_desc.address;
15491
15492 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
15493
15494 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
15495 /* 0x2 bit is always set */
15496 .flags = X86_EFLAGS_SF | 0x2,
15497 .sp = STACK_START,
15498 - .es = __USER_DS,
15499 + .es = __KERNEL_DS,
15500 .cs = __KERNEL_CS,
15501 .ss = __KERNEL_DS,
15502 - .ds = __USER_DS,
15503 + .ds = __KERNEL_DS,
15504 .fs = __KERNEL_PERCPU,
15505
15506 .__cr3 = __pa_nodebug(swapper_pg_dir),
15507 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
15508 index 2d8a371..4fa6ae6 100644
15509 --- a/arch/x86/kernel/dumpstack.c
15510 +++ b/arch/x86/kernel/dumpstack.c
15511 @@ -2,6 +2,9 @@
15512 * Copyright (C) 1991, 1992 Linus Torvalds
15513 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
15514 */
15515 +#ifdef CONFIG_GRKERNSEC_HIDESYM
15516 +#define __INCLUDED_BY_HIDESYM 1
15517 +#endif
15518 #include <linux/kallsyms.h>
15519 #include <linux/kprobes.h>
15520 #include <linux/uaccess.h>
15521 @@ -28,7 +31,7 @@ static int die_counter;
15522
15523 void printk_address(unsigned long address, int reliable)
15524 {
15525 - printk(" [<%p>] %s%pS\n", (void *) address,
15526 + printk(" [<%p>] %s%pA\n", (void *) address,
15527 reliable ? "" : "? ", (void *) address);
15528 }
15529
15530 @@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
15531 static void
15532 print_ftrace_graph_addr(unsigned long addr, void *data,
15533 const struct stacktrace_ops *ops,
15534 - struct thread_info *tinfo, int *graph)
15535 + struct task_struct *task, int *graph)
15536 {
15537 - struct task_struct *task = tinfo->task;
15538 unsigned long ret_addr;
15539 int index = task->curr_ret_stack;
15540
15541 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15542 static inline void
15543 print_ftrace_graph_addr(unsigned long addr, void *data,
15544 const struct stacktrace_ops *ops,
15545 - struct thread_info *tinfo, int *graph)
15546 + struct task_struct *task, int *graph)
15547 { }
15548 #endif
15549
15550 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15551 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
15552 */
15553
15554 -static inline int valid_stack_ptr(struct thread_info *tinfo,
15555 - void *p, unsigned int size, void *end)
15556 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
15557 {
15558 - void *t = tinfo;
15559 if (end) {
15560 if (p < end && p >= (end-THREAD_SIZE))
15561 return 1;
15562 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
15563 }
15564
15565 unsigned long
15566 -print_context_stack(struct thread_info *tinfo,
15567 +print_context_stack(struct task_struct *task, void *stack_start,
15568 unsigned long *stack, unsigned long bp,
15569 const struct stacktrace_ops *ops, void *data,
15570 unsigned long *end, int *graph)
15571 {
15572 struct stack_frame *frame = (struct stack_frame *)bp;
15573
15574 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
15575 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
15576 unsigned long addr;
15577
15578 addr = *stack;
15579 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
15580 } else {
15581 ops->address(data, addr, 0);
15582 }
15583 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15584 + print_ftrace_graph_addr(addr, data, ops, task, graph);
15585 }
15586 stack++;
15587 }
15588 @@ -180,7 +180,7 @@ void dump_stack(void)
15589 #endif
15590
15591 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
15592 - current->pid, current->comm, print_tainted(),
15593 + task_pid_nr(current), current->comm, print_tainted(),
15594 init_utsname()->release,
15595 (int)strcspn(init_utsname()->version, " "),
15596 init_utsname()->version);
15597 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
15598 return flags;
15599 }
15600
15601 +extern void gr_handle_kernel_exploit(void);
15602 +
15603 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15604 {
15605 if (regs && kexec_should_crash(current))
15606 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15607 panic("Fatal exception in interrupt");
15608 if (panic_on_oops)
15609 panic("Fatal exception");
15610 - do_exit(signr);
15611 +
15612 + gr_handle_kernel_exploit();
15613 +
15614 + do_group_exit(signr);
15615 }
15616
15617 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15618 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
15619 unsigned long flags = oops_begin();
15620 int sig = SIGSEGV;
15621
15622 - if (!user_mode_vm(regs))
15623 + if (!user_mode(regs))
15624 report_bug(regs->ip, regs);
15625
15626 if (__die(str, regs, err))
15627 diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
15628 index 81086c2..13e8b17 100644
15629 --- a/arch/x86/kernel/dumpstack.h
15630 +++ b/arch/x86/kernel/dumpstack.h
15631 @@ -15,7 +15,7 @@
15632 #endif
15633
15634 extern unsigned long
15635 -print_context_stack(struct thread_info *tinfo,
15636 +print_context_stack(struct task_struct *task, void *stack_start,
15637 unsigned long *stack, unsigned long bp,
15638 const struct stacktrace_ops *ops, void *data,
15639 unsigned long *end, int *graph);
15640 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
15641 index f7dd2a7..504f53b 100644
15642 --- a/arch/x86/kernel/dumpstack_32.c
15643 +++ b/arch/x86/kernel/dumpstack_32.c
15644 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15645 #endif
15646
15647 for (;;) {
15648 - struct thread_info *context;
15649 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15650 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15651
15652 - context = (struct thread_info *)
15653 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
15654 - bp = print_context_stack(context, stack, bp, ops,
15655 - data, NULL, &graph);
15656 -
15657 - stack = (unsigned long *)context->previous_esp;
15658 - if (!stack)
15659 + if (stack_start == task_stack_page(task))
15660 break;
15661 + stack = *(unsigned long **)stack_start;
15662 if (ops->stack(data, "IRQ") < 0)
15663 break;
15664 touch_nmi_watchdog();
15665 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
15666 * When in-kernel, we also print out the stack and code at the
15667 * time of the fault..
15668 */
15669 - if (!user_mode_vm(regs)) {
15670 + if (!user_mode(regs)) {
15671 unsigned int code_prologue = code_bytes * 43 / 64;
15672 unsigned int code_len = code_bytes;
15673 unsigned char c;
15674 u8 *ip;
15675 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
15676
15677 printk(KERN_EMERG "Stack:\n");
15678 show_stack_log_lvl(NULL, regs, &regs->sp,
15679 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
15680
15681 printk(KERN_EMERG "Code: ");
15682
15683 - ip = (u8 *)regs->ip - code_prologue;
15684 + ip = (u8 *)regs->ip - code_prologue + cs_base;
15685 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
15686 /* try starting at IP */
15687 - ip = (u8 *)regs->ip;
15688 + ip = (u8 *)regs->ip + cs_base;
15689 code_len = code_len - code_prologue + 1;
15690 }
15691 for (i = 0; i < code_len; i++, ip++) {
15692 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
15693 printk(" Bad EIP value.");
15694 break;
15695 }
15696 - if (ip == (u8 *)regs->ip)
15697 + if (ip == (u8 *)regs->ip + cs_base)
15698 printk("<%02x> ", c);
15699 else
15700 printk("%02x ", c);
15701 @@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
15702 printk("\n");
15703 }
15704
15705 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15706 +void pax_check_alloca(unsigned long size)
15707 +{
15708 + unsigned long sp = (unsigned long)&sp, stack_left;
15709 +
15710 + /* all kernel stacks are of the same size */
15711 + stack_left = sp & (THREAD_SIZE - 1);
15712 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15713 +}
15714 +EXPORT_SYMBOL(pax_check_alloca);
15715 +#endif
15716 +
15717 int is_valid_bugaddr(unsigned long ip)
15718 {
15719 unsigned short ud2;
15720
15721 + ip = ktla_ktva(ip);
15722 if (ip < PAGE_OFFSET)
15723 return 0;
15724 if (probe_kernel_address((unsigned short *)ip, ud2))
15725 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
15726 index a071e6b..36cd585 100644
15727 --- a/arch/x86/kernel/dumpstack_64.c
15728 +++ b/arch/x86/kernel/dumpstack_64.c
15729 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15730 unsigned long *irq_stack_end =
15731 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
15732 unsigned used = 0;
15733 - struct thread_info *tinfo;
15734 int graph = 0;
15735 + void *stack_start;
15736
15737 if (!task)
15738 task = current;
15739 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15740 * current stack address. If the stacks consist of nested
15741 * exceptions
15742 */
15743 - tinfo = task_thread_info(task);
15744 for (;;) {
15745 char *id;
15746 unsigned long *estack_end;
15747 +
15748 estack_end = in_exception_stack(cpu, (unsigned long)stack,
15749 &used, &id);
15750
15751 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15752 if (ops->stack(data, id) < 0)
15753 break;
15754
15755 - bp = print_context_stack(tinfo, stack, bp, ops,
15756 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
15757 data, estack_end, &graph);
15758 ops->stack(data, "<EOE>");
15759 /*
15760 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15761 if (stack >= irq_stack && stack < irq_stack_end) {
15762 if (ops->stack(data, "IRQ") < 0)
15763 break;
15764 - bp = print_context_stack(tinfo, stack, bp,
15765 + bp = print_context_stack(task, irq_stack, stack, bp,
15766 ops, data, irq_stack_end, &graph);
15767 /*
15768 * We link to the next stack (which would be
15769 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15770 /*
15771 * This handles the process stack:
15772 */
15773 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
15774 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15775 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15776 put_cpu();
15777 }
15778 EXPORT_SYMBOL(dump_trace);
15779 @@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
15780 return ud2 == 0x0b0f;
15781 }
15782
15783 +
15784 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15785 +void pax_check_alloca(unsigned long size)
15786 +{
15787 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
15788 + unsigned cpu, used;
15789 + char *id;
15790 +
15791 + /* check the process stack first */
15792 + stack_start = (unsigned long)task_stack_page(current);
15793 + stack_end = stack_start + THREAD_SIZE;
15794 + if (likely(stack_start <= sp && sp < stack_end)) {
15795 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
15796 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15797 + return;
15798 + }
15799 +
15800 + cpu = get_cpu();
15801 +
15802 + /* check the irq stacks */
15803 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
15804 + stack_start = stack_end - IRQ_STACK_SIZE;
15805 + if (stack_start <= sp && sp < stack_end) {
15806 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
15807 + put_cpu();
15808 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15809 + return;
15810 + }
15811 +
15812 + /* check the exception stacks */
15813 + used = 0;
15814 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
15815 + stack_start = stack_end - EXCEPTION_STKSZ;
15816 + if (stack_end && stack_start <= sp && sp < stack_end) {
15817 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
15818 + put_cpu();
15819 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
15820 + return;
15821 + }
15822 +
15823 + put_cpu();
15824 +
15825 + /* unknown stack */
15826 + BUG();
15827 +}
15828 +EXPORT_SYMBOL(pax_check_alloca);
15829 +#endif
15830 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
15831 index a89739a..95e0c48 100644
15832 --- a/arch/x86/kernel/e820.c
15833 +++ b/arch/x86/kernel/e820.c
15834 @@ -733,7 +733,7 @@ struct early_res {
15835 };
15836 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
15837 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
15838 - {}
15839 + { 0, 0, {0}, 0 }
15840 };
15841
15842 static int __init find_overlapped_early(u64 start, u64 end)
15843 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
15844 index b9c830c..1e41a96 100644
15845 --- a/arch/x86/kernel/early_printk.c
15846 +++ b/arch/x86/kernel/early_printk.c
15847 @@ -7,6 +7,7 @@
15848 #include <linux/pci_regs.h>
15849 #include <linux/pci_ids.h>
15850 #include <linux/errno.h>
15851 +#include <linux/sched.h>
15852 #include <asm/io.h>
15853 #include <asm/processor.h>
15854 #include <asm/fcntl.h>
15855 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
15856 int n;
15857 va_list ap;
15858
15859 + pax_track_stack();
15860 +
15861 va_start(ap, fmt);
15862 n = vscnprintf(buf, sizeof(buf), fmt, ap);
15863 early_console->write(early_console, buf, n);
15864 diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
15865 index 5cab48e..b025f9b 100644
15866 --- a/arch/x86/kernel/efi_32.c
15867 +++ b/arch/x86/kernel/efi_32.c
15868 @@ -38,70 +38,56 @@
15869 */
15870
15871 static unsigned long efi_rt_eflags;
15872 -static pgd_t efi_bak_pg_dir_pointer[2];
15873 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
15874
15875 -void efi_call_phys_prelog(void)
15876 +void __init efi_call_phys_prelog(void)
15877 {
15878 - unsigned long cr4;
15879 - unsigned long temp;
15880 struct desc_ptr gdt_descr;
15881
15882 +#ifdef CONFIG_PAX_KERNEXEC
15883 + struct desc_struct d;
15884 +#endif
15885 +
15886 local_irq_save(efi_rt_eflags);
15887
15888 - /*
15889 - * If I don't have PAE, I should just duplicate two entries in page
15890 - * directory. If I have PAE, I just need to duplicate one entry in
15891 - * page directory.
15892 - */
15893 - cr4 = read_cr4_safe();
15894 -
15895 - if (cr4 & X86_CR4_PAE) {
15896 - efi_bak_pg_dir_pointer[0].pgd =
15897 - swapper_pg_dir[pgd_index(0)].pgd;
15898 - swapper_pg_dir[0].pgd =
15899 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15900 - } else {
15901 - efi_bak_pg_dir_pointer[0].pgd =
15902 - swapper_pg_dir[pgd_index(0)].pgd;
15903 - efi_bak_pg_dir_pointer[1].pgd =
15904 - swapper_pg_dir[pgd_index(0x400000)].pgd;
15905 - swapper_pg_dir[pgd_index(0)].pgd =
15906 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15907 - temp = PAGE_OFFSET + 0x400000;
15908 - swapper_pg_dir[pgd_index(0x400000)].pgd =
15909 - swapper_pg_dir[pgd_index(temp)].pgd;
15910 - }
15911 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
15912 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15913 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15914
15915 /*
15916 * After the lock is released, the original page table is restored.
15917 */
15918 __flush_tlb_all();
15919
15920 +#ifdef CONFIG_PAX_KERNEXEC
15921 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
15922 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15923 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
15924 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15925 +#endif
15926 +
15927 gdt_descr.address = __pa(get_cpu_gdt_table(0));
15928 gdt_descr.size = GDT_SIZE - 1;
15929 load_gdt(&gdt_descr);
15930 }
15931
15932 -void efi_call_phys_epilog(void)
15933 +void __init efi_call_phys_epilog(void)
15934 {
15935 - unsigned long cr4;
15936 struct desc_ptr gdt_descr;
15937
15938 +#ifdef CONFIG_PAX_KERNEXEC
15939 + struct desc_struct d;
15940 +
15941 + memset(&d, 0, sizeof d);
15942 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15943 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15944 +#endif
15945 +
15946 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
15947 gdt_descr.size = GDT_SIZE - 1;
15948 load_gdt(&gdt_descr);
15949
15950 - cr4 = read_cr4_safe();
15951 -
15952 - if (cr4 & X86_CR4_PAE) {
15953 - swapper_pg_dir[pgd_index(0)].pgd =
15954 - efi_bak_pg_dir_pointer[0].pgd;
15955 - } else {
15956 - swapper_pg_dir[pgd_index(0)].pgd =
15957 - efi_bak_pg_dir_pointer[0].pgd;
15958 - swapper_pg_dir[pgd_index(0x400000)].pgd =
15959 - efi_bak_pg_dir_pointer[1].pgd;
15960 - }
15961 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
15962
15963 /*
15964 * After the lock is released, the original page table is restored.
15965 diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
15966 index fbe66e6..c5c0dd2 100644
15967 --- a/arch/x86/kernel/efi_stub_32.S
15968 +++ b/arch/x86/kernel/efi_stub_32.S
15969 @@ -6,7 +6,9 @@
15970 */
15971
15972 #include <linux/linkage.h>
15973 +#include <linux/init.h>
15974 #include <asm/page_types.h>
15975 +#include <asm/segment.h>
15976
15977 /*
15978 * efi_call_phys(void *, ...) is a function with variable parameters.
15979 @@ -20,7 +22,7 @@
15980 * service functions will comply with gcc calling convention, too.
15981 */
15982
15983 -.text
15984 +__INIT
15985 ENTRY(efi_call_phys)
15986 /*
15987 * 0. The function can only be called in Linux kernel. So CS has been
15988 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
15989 * The mapping of lower virtual memory has been created in prelog and
15990 * epilog.
15991 */
15992 - movl $1f, %edx
15993 - subl $__PAGE_OFFSET, %edx
15994 - jmp *%edx
15995 + movl $(__KERNEXEC_EFI_DS), %edx
15996 + mov %edx, %ds
15997 + mov %edx, %es
15998 + mov %edx, %ss
15999 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
16000 1:
16001
16002 /*
16003 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
16004 * parameter 2, ..., param n. To make things easy, we save the return
16005 * address of efi_call_phys in a global variable.
16006 */
16007 - popl %edx
16008 - movl %edx, saved_return_addr
16009 - /* get the function pointer into ECX*/
16010 - popl %ecx
16011 - movl %ecx, efi_rt_function_ptr
16012 - movl $2f, %edx
16013 - subl $__PAGE_OFFSET, %edx
16014 - pushl %edx
16015 + popl (saved_return_addr)
16016 + popl (efi_rt_function_ptr)
16017
16018 /*
16019 * 3. Clear PG bit in %CR0.
16020 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
16021 /*
16022 * 5. Call the physical function.
16023 */
16024 - jmp *%ecx
16025 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
16026
16027 -2:
16028 /*
16029 * 6. After EFI runtime service returns, control will return to
16030 * following instruction. We'd better readjust stack pointer first.
16031 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
16032 movl %cr0, %edx
16033 orl $0x80000000, %edx
16034 movl %edx, %cr0
16035 - jmp 1f
16036 -1:
16037 +
16038 /*
16039 * 8. Now restore the virtual mode from flat mode by
16040 * adding EIP with PAGE_OFFSET.
16041 */
16042 - movl $1f, %edx
16043 - jmp *%edx
16044 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
16045 1:
16046 + movl $(__KERNEL_DS), %edx
16047 + mov %edx, %ds
16048 + mov %edx, %es
16049 + mov %edx, %ss
16050
16051 /*
16052 * 9. Balance the stack. And because EAX contain the return value,
16053 * we'd better not clobber it.
16054 */
16055 - leal efi_rt_function_ptr, %edx
16056 - movl (%edx), %ecx
16057 - pushl %ecx
16058 + pushl (efi_rt_function_ptr)
16059
16060 /*
16061 - * 10. Push the saved return address onto the stack and return.
16062 + * 10. Return to the saved return address.
16063 */
16064 - leal saved_return_addr, %edx
16065 - movl (%edx), %ecx
16066 - pushl %ecx
16067 - ret
16068 + jmpl *(saved_return_addr)
16069 ENDPROC(efi_call_phys)
16070 .previous
16071
16072 -.data
16073 +__INITDATA
16074 saved_return_addr:
16075 .long 0
16076 efi_rt_function_ptr:
16077 diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
16078 index 4c07cca..2c8427d 100644
16079 --- a/arch/x86/kernel/efi_stub_64.S
16080 +++ b/arch/x86/kernel/efi_stub_64.S
16081 @@ -7,6 +7,7 @@
16082 */
16083
16084 #include <linux/linkage.h>
16085 +#include <asm/alternative-asm.h>
16086
16087 #define SAVE_XMM \
16088 mov %rsp, %rax; \
16089 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
16090 call *%rdi
16091 addq $32, %rsp
16092 RESTORE_XMM
16093 + pax_force_retaddr 0, 1
16094 ret
16095 ENDPROC(efi_call0)
16096
16097 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
16098 call *%rdi
16099 addq $32, %rsp
16100 RESTORE_XMM
16101 + pax_force_retaddr 0, 1
16102 ret
16103 ENDPROC(efi_call1)
16104
16105 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
16106 call *%rdi
16107 addq $32, %rsp
16108 RESTORE_XMM
16109 + pax_force_retaddr 0, 1
16110 ret
16111 ENDPROC(efi_call2)
16112
16113 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
16114 call *%rdi
16115 addq $32, %rsp
16116 RESTORE_XMM
16117 + pax_force_retaddr 0, 1
16118 ret
16119 ENDPROC(efi_call3)
16120
16121 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
16122 call *%rdi
16123 addq $32, %rsp
16124 RESTORE_XMM
16125 + pax_force_retaddr 0, 1
16126 ret
16127 ENDPROC(efi_call4)
16128
16129 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
16130 call *%rdi
16131 addq $48, %rsp
16132 RESTORE_XMM
16133 + pax_force_retaddr 0, 1
16134 ret
16135 ENDPROC(efi_call5)
16136
16137 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
16138 call *%rdi
16139 addq $48, %rsp
16140 RESTORE_XMM
16141 + pax_force_retaddr 0, 1
16142 ret
16143 ENDPROC(efi_call6)
16144 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
16145 index c097e7d..c689cf4 100644
16146 --- a/arch/x86/kernel/entry_32.S
16147 +++ b/arch/x86/kernel/entry_32.S
16148 @@ -185,13 +185,146 @@
16149 /*CFI_REL_OFFSET gs, PT_GS*/
16150 .endm
16151 .macro SET_KERNEL_GS reg
16152 +
16153 +#ifdef CONFIG_CC_STACKPROTECTOR
16154 movl $(__KERNEL_STACK_CANARY), \reg
16155 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16156 + movl $(__USER_DS), \reg
16157 +#else
16158 + xorl \reg, \reg
16159 +#endif
16160 +
16161 movl \reg, %gs
16162 .endm
16163
16164 #endif /* CONFIG_X86_32_LAZY_GS */
16165
16166 -.macro SAVE_ALL
16167 +.macro pax_enter_kernel
16168 +#ifdef CONFIG_PAX_KERNEXEC
16169 + call pax_enter_kernel
16170 +#endif
16171 +.endm
16172 +
16173 +.macro pax_exit_kernel
16174 +#ifdef CONFIG_PAX_KERNEXEC
16175 + call pax_exit_kernel
16176 +#endif
16177 +.endm
16178 +
16179 +#ifdef CONFIG_PAX_KERNEXEC
16180 +ENTRY(pax_enter_kernel)
16181 +#ifdef CONFIG_PARAVIRT
16182 + pushl %eax
16183 + pushl %ecx
16184 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
16185 + mov %eax, %esi
16186 +#else
16187 + mov %cr0, %esi
16188 +#endif
16189 + bts $16, %esi
16190 + jnc 1f
16191 + mov %cs, %esi
16192 + cmp $__KERNEL_CS, %esi
16193 + jz 3f
16194 + ljmp $__KERNEL_CS, $3f
16195 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
16196 +2:
16197 +#ifdef CONFIG_PARAVIRT
16198 + mov %esi, %eax
16199 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16200 +#else
16201 + mov %esi, %cr0
16202 +#endif
16203 +3:
16204 +#ifdef CONFIG_PARAVIRT
16205 + popl %ecx
16206 + popl %eax
16207 +#endif
16208 + ret
16209 +ENDPROC(pax_enter_kernel)
16210 +
16211 +ENTRY(pax_exit_kernel)
16212 +#ifdef CONFIG_PARAVIRT
16213 + pushl %eax
16214 + pushl %ecx
16215 +#endif
16216 + mov %cs, %esi
16217 + cmp $__KERNEXEC_KERNEL_CS, %esi
16218 + jnz 2f
16219 +#ifdef CONFIG_PARAVIRT
16220 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
16221 + mov %eax, %esi
16222 +#else
16223 + mov %cr0, %esi
16224 +#endif
16225 + btr $16, %esi
16226 + ljmp $__KERNEL_CS, $1f
16227 +1:
16228 +#ifdef CONFIG_PARAVIRT
16229 + mov %esi, %eax
16230 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
16231 +#else
16232 + mov %esi, %cr0
16233 +#endif
16234 +2:
16235 +#ifdef CONFIG_PARAVIRT
16236 + popl %ecx
16237 + popl %eax
16238 +#endif
16239 + ret
16240 +ENDPROC(pax_exit_kernel)
16241 +#endif
16242 +
16243 +.macro pax_erase_kstack
16244 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16245 + call pax_erase_kstack
16246 +#endif
16247 +.endm
16248 +
16249 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16250 +/*
16251 + * ebp: thread_info
16252 + * ecx, edx: can be clobbered
16253 + */
16254 +ENTRY(pax_erase_kstack)
16255 + pushl %edi
16256 + pushl %eax
16257 +
16258 + mov TI_lowest_stack(%ebp), %edi
16259 + mov $-0xBEEF, %eax
16260 + std
16261 +
16262 +1: mov %edi, %ecx
16263 + and $THREAD_SIZE_asm - 1, %ecx
16264 + shr $2, %ecx
16265 + repne scasl
16266 + jecxz 2f
16267 +
16268 + cmp $2*16, %ecx
16269 + jc 2f
16270 +
16271 + mov $2*16, %ecx
16272 + repe scasl
16273 + jecxz 2f
16274 + jne 1b
16275 +
16276 +2: cld
16277 + mov %esp, %ecx
16278 + sub %edi, %ecx
16279 + shr $2, %ecx
16280 + rep stosl
16281 +
16282 + mov TI_task_thread_sp0(%ebp), %edi
16283 + sub $128, %edi
16284 + mov %edi, TI_lowest_stack(%ebp)
16285 +
16286 + popl %eax
16287 + popl %edi
16288 + ret
16289 +ENDPROC(pax_erase_kstack)
16290 +#endif
16291 +
16292 +.macro __SAVE_ALL _DS
16293 cld
16294 PUSH_GS
16295 pushl %fs
16296 @@ -224,7 +357,7 @@
16297 pushl %ebx
16298 CFI_ADJUST_CFA_OFFSET 4
16299 CFI_REL_OFFSET ebx, 0
16300 - movl $(__USER_DS), %edx
16301 + movl $\_DS, %edx
16302 movl %edx, %ds
16303 movl %edx, %es
16304 movl $(__KERNEL_PERCPU), %edx
16305 @@ -232,6 +365,15 @@
16306 SET_KERNEL_GS %edx
16307 .endm
16308
16309 +.macro SAVE_ALL
16310 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
16311 + __SAVE_ALL __KERNEL_DS
16312 + pax_enter_kernel
16313 +#else
16314 + __SAVE_ALL __USER_DS
16315 +#endif
16316 +.endm
16317 +
16318 .macro RESTORE_INT_REGS
16319 popl %ebx
16320 CFI_ADJUST_CFA_OFFSET -4
16321 @@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
16322 CFI_ADJUST_CFA_OFFSET -4
16323 jmp syscall_exit
16324 CFI_ENDPROC
16325 -END(ret_from_fork)
16326 +ENDPROC(ret_from_fork)
16327
16328 /*
16329 * Return to user mode is not as complex as all this looks,
16330 @@ -352,7 +494,15 @@ check_userspace:
16331 movb PT_CS(%esp), %al
16332 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
16333 cmpl $USER_RPL, %eax
16334 +
16335 +#ifdef CONFIG_PAX_KERNEXEC
16336 + jae resume_userspace
16337 +
16338 + PAX_EXIT_KERNEL
16339 + jmp resume_kernel
16340 +#else
16341 jb resume_kernel # not returning to v8086 or userspace
16342 +#endif
16343
16344 ENTRY(resume_userspace)
16345 LOCKDEP_SYS_EXIT
16346 @@ -364,8 +514,8 @@ ENTRY(resume_userspace)
16347 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
16348 # int/exception return?
16349 jne work_pending
16350 - jmp restore_all
16351 -END(ret_from_exception)
16352 + jmp restore_all_pax
16353 +ENDPROC(ret_from_exception)
16354
16355 #ifdef CONFIG_PREEMPT
16356 ENTRY(resume_kernel)
16357 @@ -380,7 +530,7 @@ need_resched:
16358 jz restore_all
16359 call preempt_schedule_irq
16360 jmp need_resched
16361 -END(resume_kernel)
16362 +ENDPROC(resume_kernel)
16363 #endif
16364 CFI_ENDPROC
16365
16366 @@ -414,25 +564,36 @@ sysenter_past_esp:
16367 /*CFI_REL_OFFSET cs, 0*/
16368 /*
16369 * Push current_thread_info()->sysenter_return to the stack.
16370 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
16371 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
16372 */
16373 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
16374 + pushl $0
16375 CFI_ADJUST_CFA_OFFSET 4
16376 CFI_REL_OFFSET eip, 0
16377
16378 pushl %eax
16379 CFI_ADJUST_CFA_OFFSET 4
16380 SAVE_ALL
16381 + GET_THREAD_INFO(%ebp)
16382 + movl TI_sysenter_return(%ebp),%ebp
16383 + movl %ebp,PT_EIP(%esp)
16384 ENABLE_INTERRUPTS(CLBR_NONE)
16385
16386 /*
16387 * Load the potential sixth argument from user stack.
16388 * Careful about security.
16389 */
16390 + movl PT_OLDESP(%esp),%ebp
16391 +
16392 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16393 + mov PT_OLDSS(%esp),%ds
16394 +1: movl %ds:(%ebp),%ebp
16395 + push %ss
16396 + pop %ds
16397 +#else
16398 cmpl $__PAGE_OFFSET-3,%ebp
16399 jae syscall_fault
16400 1: movl (%ebp),%ebp
16401 +#endif
16402 +
16403 movl %ebp,PT_EBP(%esp)
16404 .section __ex_table,"a"
16405 .align 4
16406 @@ -455,12 +616,24 @@ sysenter_do_call:
16407 testl $_TIF_ALLWORK_MASK, %ecx
16408 jne sysexit_audit
16409 sysenter_exit:
16410 +
16411 +#ifdef CONFIG_PAX_RANDKSTACK
16412 + pushl_cfi %eax
16413 + movl %esp, %eax
16414 + call pax_randomize_kstack
16415 + popl_cfi %eax
16416 +#endif
16417 +
16418 + pax_erase_kstack
16419 +
16420 /* if something modifies registers it must also disable sysexit */
16421 movl PT_EIP(%esp), %edx
16422 movl PT_OLDESP(%esp), %ecx
16423 xorl %ebp,%ebp
16424 TRACE_IRQS_ON
16425 1: mov PT_FS(%esp), %fs
16426 +2: mov PT_DS(%esp), %ds
16427 +3: mov PT_ES(%esp), %es
16428 PTGS_TO_GS
16429 ENABLE_INTERRUPTS_SYSEXIT
16430
16431 @@ -477,6 +650,9 @@ sysenter_audit:
16432 movl %eax,%edx /* 2nd arg: syscall number */
16433 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
16434 call audit_syscall_entry
16435 +
16436 + pax_erase_kstack
16437 +
16438 pushl %ebx
16439 CFI_ADJUST_CFA_OFFSET 4
16440 movl PT_EAX(%esp),%eax /* reload syscall number */
16441 @@ -504,11 +680,17 @@ sysexit_audit:
16442
16443 CFI_ENDPROC
16444 .pushsection .fixup,"ax"
16445 -2: movl $0,PT_FS(%esp)
16446 +4: movl $0,PT_FS(%esp)
16447 + jmp 1b
16448 +5: movl $0,PT_DS(%esp)
16449 + jmp 1b
16450 +6: movl $0,PT_ES(%esp)
16451 jmp 1b
16452 .section __ex_table,"a"
16453 .align 4
16454 - .long 1b,2b
16455 + .long 1b,4b
16456 + .long 2b,5b
16457 + .long 3b,6b
16458 .popsection
16459 PTGS_TO_GS_EX
16460 ENDPROC(ia32_sysenter_target)
16461 @@ -538,6 +720,15 @@ syscall_exit:
16462 testl $_TIF_ALLWORK_MASK, %ecx # current->work
16463 jne syscall_exit_work
16464
16465 +restore_all_pax:
16466 +
16467 +#ifdef CONFIG_PAX_RANDKSTACK
16468 + movl %esp, %eax
16469 + call pax_randomize_kstack
16470 +#endif
16471 +
16472 + pax_erase_kstack
16473 +
16474 restore_all:
16475 TRACE_IRQS_IRET
16476 restore_all_notrace:
16477 @@ -602,10 +793,29 @@ ldt_ss:
16478 mov PT_OLDESP(%esp), %eax /* load userspace esp */
16479 mov %dx, %ax /* eax: new kernel esp */
16480 sub %eax, %edx /* offset (low word is 0) */
16481 - PER_CPU(gdt_page, %ebx)
16482 +#ifdef CONFIG_SMP
16483 + movl PER_CPU_VAR(cpu_number), %ebx
16484 + shll $PAGE_SHIFT_asm, %ebx
16485 + addl $cpu_gdt_table, %ebx
16486 +#else
16487 + movl $cpu_gdt_table, %ebx
16488 +#endif
16489 shr $16, %edx
16490 +
16491 +#ifdef CONFIG_PAX_KERNEXEC
16492 + mov %cr0, %esi
16493 + btr $16, %esi
16494 + mov %esi, %cr0
16495 +#endif
16496 +
16497 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
16498 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
16499 +
16500 +#ifdef CONFIG_PAX_KERNEXEC
16501 + bts $16, %esi
16502 + mov %esi, %cr0
16503 +#endif
16504 +
16505 pushl $__ESPFIX_SS
16506 CFI_ADJUST_CFA_OFFSET 4
16507 push %eax /* new kernel esp */
16508 @@ -636,36 +846,30 @@ work_resched:
16509 movl TI_flags(%ebp), %ecx
16510 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
16511 # than syscall tracing?
16512 - jz restore_all
16513 + jz restore_all_pax
16514 testb $_TIF_NEED_RESCHED, %cl
16515 jnz work_resched
16516
16517 work_notifysig: # deal with pending signals and
16518 # notify-resume requests
16519 + movl %esp, %eax
16520 #ifdef CONFIG_VM86
16521 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
16522 - movl %esp, %eax
16523 - jne work_notifysig_v86 # returning to kernel-space or
16524 + jz 1f # returning to kernel-space or
16525 # vm86-space
16526 - xorl %edx, %edx
16527 - call do_notify_resume
16528 - jmp resume_userspace_sig
16529
16530 - ALIGN
16531 -work_notifysig_v86:
16532 pushl %ecx # save ti_flags for do_notify_resume
16533 CFI_ADJUST_CFA_OFFSET 4
16534 call save_v86_state # %eax contains pt_regs pointer
16535 popl %ecx
16536 CFI_ADJUST_CFA_OFFSET -4
16537 movl %eax, %esp
16538 -#else
16539 - movl %esp, %eax
16540 +1:
16541 #endif
16542 xorl %edx, %edx
16543 call do_notify_resume
16544 jmp resume_userspace_sig
16545 -END(work_pending)
16546 +ENDPROC(work_pending)
16547
16548 # perform syscall exit tracing
16549 ALIGN
16550 @@ -673,11 +877,14 @@ syscall_trace_entry:
16551 movl $-ENOSYS,PT_EAX(%esp)
16552 movl %esp, %eax
16553 call syscall_trace_enter
16554 +
16555 + pax_erase_kstack
16556 +
16557 /* What it returned is what we'll actually use. */
16558 cmpl $(nr_syscalls), %eax
16559 jnae syscall_call
16560 jmp syscall_exit
16561 -END(syscall_trace_entry)
16562 +ENDPROC(syscall_trace_entry)
16563
16564 # perform syscall exit tracing
16565 ALIGN
16566 @@ -690,20 +897,24 @@ syscall_exit_work:
16567 movl %esp, %eax
16568 call syscall_trace_leave
16569 jmp resume_userspace
16570 -END(syscall_exit_work)
16571 +ENDPROC(syscall_exit_work)
16572 CFI_ENDPROC
16573
16574 RING0_INT_FRAME # can't unwind into user space anyway
16575 syscall_fault:
16576 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16577 + push %ss
16578 + pop %ds
16579 +#endif
16580 GET_THREAD_INFO(%ebp)
16581 movl $-EFAULT,PT_EAX(%esp)
16582 jmp resume_userspace
16583 -END(syscall_fault)
16584 +ENDPROC(syscall_fault)
16585
16586 syscall_badsys:
16587 movl $-ENOSYS,PT_EAX(%esp)
16588 jmp resume_userspace
16589 -END(syscall_badsys)
16590 +ENDPROC(syscall_badsys)
16591 CFI_ENDPROC
16592
16593 /*
16594 @@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
16595 PTREGSCALL(vm86)
16596 PTREGSCALL(vm86old)
16597
16598 + ALIGN;
16599 +ENTRY(kernel_execve)
16600 + push %ebp
16601 + sub $PT_OLDSS+4,%esp
16602 + push %edi
16603 + push %ecx
16604 + push %eax
16605 + lea 3*4(%esp),%edi
16606 + mov $PT_OLDSS/4+1,%ecx
16607 + xorl %eax,%eax
16608 + rep stosl
16609 + pop %eax
16610 + pop %ecx
16611 + pop %edi
16612 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
16613 + mov %eax,PT_EBX(%esp)
16614 + mov %edx,PT_ECX(%esp)
16615 + mov %ecx,PT_EDX(%esp)
16616 + mov %esp,%eax
16617 + call sys_execve
16618 + GET_THREAD_INFO(%ebp)
16619 + test %eax,%eax
16620 + jz syscall_exit
16621 + add $PT_OLDSS+4,%esp
16622 + pop %ebp
16623 + ret
16624 +
16625 .macro FIXUP_ESPFIX_STACK
16626 /*
16627 * Switch back for ESPFIX stack to the normal zerobased stack
16628 @@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
16629 * normal stack and adjusts ESP with the matching offset.
16630 */
16631 /* fixup the stack */
16632 - PER_CPU(gdt_page, %ebx)
16633 +#ifdef CONFIG_SMP
16634 + movl PER_CPU_VAR(cpu_number), %ebx
16635 + shll $PAGE_SHIFT_asm, %ebx
16636 + addl $cpu_gdt_table, %ebx
16637 +#else
16638 + movl $cpu_gdt_table, %ebx
16639 +#endif
16640 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
16641 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
16642 shl $16, %eax
16643 @@ -793,7 +1037,7 @@ vector=vector+1
16644 .endr
16645 2: jmp common_interrupt
16646 .endr
16647 -END(irq_entries_start)
16648 +ENDPROC(irq_entries_start)
16649
16650 .previous
16651 END(interrupt)
16652 @@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
16653 CFI_ADJUST_CFA_OFFSET 4
16654 jmp error_code
16655 CFI_ENDPROC
16656 -END(coprocessor_error)
16657 +ENDPROC(coprocessor_error)
16658
16659 ENTRY(simd_coprocessor_error)
16660 RING0_INT_FRAME
16661 @@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
16662 CFI_ADJUST_CFA_OFFSET 4
16663 jmp error_code
16664 CFI_ENDPROC
16665 -END(simd_coprocessor_error)
16666 +ENDPROC(simd_coprocessor_error)
16667
16668 ENTRY(device_not_available)
16669 RING0_INT_FRAME
16670 @@ -860,7 +1104,7 @@ ENTRY(device_not_available)
16671 CFI_ADJUST_CFA_OFFSET 4
16672 jmp error_code
16673 CFI_ENDPROC
16674 -END(device_not_available)
16675 +ENDPROC(device_not_available)
16676
16677 #ifdef CONFIG_PARAVIRT
16678 ENTRY(native_iret)
16679 @@ -869,12 +1113,12 @@ ENTRY(native_iret)
16680 .align 4
16681 .long native_iret, iret_exc
16682 .previous
16683 -END(native_iret)
16684 +ENDPROC(native_iret)
16685
16686 ENTRY(native_irq_enable_sysexit)
16687 sti
16688 sysexit
16689 -END(native_irq_enable_sysexit)
16690 +ENDPROC(native_irq_enable_sysexit)
16691 #endif
16692
16693 ENTRY(overflow)
16694 @@ -885,7 +1129,7 @@ ENTRY(overflow)
16695 CFI_ADJUST_CFA_OFFSET 4
16696 jmp error_code
16697 CFI_ENDPROC
16698 -END(overflow)
16699 +ENDPROC(overflow)
16700
16701 ENTRY(bounds)
16702 RING0_INT_FRAME
16703 @@ -895,7 +1139,7 @@ ENTRY(bounds)
16704 CFI_ADJUST_CFA_OFFSET 4
16705 jmp error_code
16706 CFI_ENDPROC
16707 -END(bounds)
16708 +ENDPROC(bounds)
16709
16710 ENTRY(invalid_op)
16711 RING0_INT_FRAME
16712 @@ -905,7 +1149,7 @@ ENTRY(invalid_op)
16713 CFI_ADJUST_CFA_OFFSET 4
16714 jmp error_code
16715 CFI_ENDPROC
16716 -END(invalid_op)
16717 +ENDPROC(invalid_op)
16718
16719 ENTRY(coprocessor_segment_overrun)
16720 RING0_INT_FRAME
16721 @@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
16722 CFI_ADJUST_CFA_OFFSET 4
16723 jmp error_code
16724 CFI_ENDPROC
16725 -END(coprocessor_segment_overrun)
16726 +ENDPROC(coprocessor_segment_overrun)
16727
16728 ENTRY(invalid_TSS)
16729 RING0_EC_FRAME
16730 @@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
16731 CFI_ADJUST_CFA_OFFSET 4
16732 jmp error_code
16733 CFI_ENDPROC
16734 -END(invalid_TSS)
16735 +ENDPROC(invalid_TSS)
16736
16737 ENTRY(segment_not_present)
16738 RING0_EC_FRAME
16739 @@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
16740 CFI_ADJUST_CFA_OFFSET 4
16741 jmp error_code
16742 CFI_ENDPROC
16743 -END(segment_not_present)
16744 +ENDPROC(segment_not_present)
16745
16746 ENTRY(stack_segment)
16747 RING0_EC_FRAME
16748 @@ -939,7 +1183,7 @@ ENTRY(stack_segment)
16749 CFI_ADJUST_CFA_OFFSET 4
16750 jmp error_code
16751 CFI_ENDPROC
16752 -END(stack_segment)
16753 +ENDPROC(stack_segment)
16754
16755 ENTRY(alignment_check)
16756 RING0_EC_FRAME
16757 @@ -947,7 +1191,7 @@ ENTRY(alignment_check)
16758 CFI_ADJUST_CFA_OFFSET 4
16759 jmp error_code
16760 CFI_ENDPROC
16761 -END(alignment_check)
16762 +ENDPROC(alignment_check)
16763
16764 ENTRY(divide_error)
16765 RING0_INT_FRAME
16766 @@ -957,7 +1201,7 @@ ENTRY(divide_error)
16767 CFI_ADJUST_CFA_OFFSET 4
16768 jmp error_code
16769 CFI_ENDPROC
16770 -END(divide_error)
16771 +ENDPROC(divide_error)
16772
16773 #ifdef CONFIG_X86_MCE
16774 ENTRY(machine_check)
16775 @@ -968,7 +1212,7 @@ ENTRY(machine_check)
16776 CFI_ADJUST_CFA_OFFSET 4
16777 jmp error_code
16778 CFI_ENDPROC
16779 -END(machine_check)
16780 +ENDPROC(machine_check)
16781 #endif
16782
16783 ENTRY(spurious_interrupt_bug)
16784 @@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
16785 CFI_ADJUST_CFA_OFFSET 4
16786 jmp error_code
16787 CFI_ENDPROC
16788 -END(spurious_interrupt_bug)
16789 +ENDPROC(spurious_interrupt_bug)
16790
16791 ENTRY(kernel_thread_helper)
16792 pushl $0 # fake return address for unwinder
16793 @@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
16794
16795 ENTRY(mcount)
16796 ret
16797 -END(mcount)
16798 +ENDPROC(mcount)
16799
16800 ENTRY(ftrace_caller)
16801 cmpl $0, function_trace_stop
16802 @@ -1124,7 +1368,7 @@ ftrace_graph_call:
16803 .globl ftrace_stub
16804 ftrace_stub:
16805 ret
16806 -END(ftrace_caller)
16807 +ENDPROC(ftrace_caller)
16808
16809 #else /* ! CONFIG_DYNAMIC_FTRACE */
16810
16811 @@ -1160,7 +1404,7 @@ trace:
16812 popl %ecx
16813 popl %eax
16814 jmp ftrace_stub
16815 -END(mcount)
16816 +ENDPROC(mcount)
16817 #endif /* CONFIG_DYNAMIC_FTRACE */
16818 #endif /* CONFIG_FUNCTION_TRACER */
16819
16820 @@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
16821 popl %ecx
16822 popl %eax
16823 ret
16824 -END(ftrace_graph_caller)
16825 +ENDPROC(ftrace_graph_caller)
16826
16827 .globl return_to_handler
16828 return_to_handler:
16829 @@ -1198,7 +1442,6 @@ return_to_handler:
16830 ret
16831 #endif
16832
16833 -.section .rodata,"a"
16834 #include "syscall_table_32.S"
16835
16836 syscall_table_size=(.-sys_call_table)
16837 @@ -1255,15 +1498,18 @@ error_code:
16838 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
16839 REG_TO_PTGS %ecx
16840 SET_KERNEL_GS %ecx
16841 - movl $(__USER_DS), %ecx
16842 + movl $(__KERNEL_DS), %ecx
16843 movl %ecx, %ds
16844 movl %ecx, %es
16845 +
16846 + pax_enter_kernel
16847 +
16848 TRACE_IRQS_OFF
16849 movl %esp,%eax # pt_regs pointer
16850 call *%edi
16851 jmp ret_from_exception
16852 CFI_ENDPROC
16853 -END(page_fault)
16854 +ENDPROC(page_fault)
16855
16856 /*
16857 * Debug traps and NMI can happen at the one SYSENTER instruction
16858 @@ -1309,7 +1555,7 @@ debug_stack_correct:
16859 call do_debug
16860 jmp ret_from_exception
16861 CFI_ENDPROC
16862 -END(debug)
16863 +ENDPROC(debug)
16864
16865 /*
16866 * NMI is doubly nasty. It can happen _while_ we're handling
16867 @@ -1351,6 +1597,9 @@ nmi_stack_correct:
16868 xorl %edx,%edx # zero error code
16869 movl %esp,%eax # pt_regs pointer
16870 call do_nmi
16871 +
16872 + pax_exit_kernel
16873 +
16874 jmp restore_all_notrace
16875 CFI_ENDPROC
16876
16877 @@ -1391,12 +1640,15 @@ nmi_espfix_stack:
16878 FIXUP_ESPFIX_STACK # %eax == %esp
16879 xorl %edx,%edx # zero error code
16880 call do_nmi
16881 +
16882 + pax_exit_kernel
16883 +
16884 RESTORE_REGS
16885 lss 12+4(%esp), %esp # back to espfix stack
16886 CFI_ADJUST_CFA_OFFSET -24
16887 jmp irq_return
16888 CFI_ENDPROC
16889 -END(nmi)
16890 +ENDPROC(nmi)
16891
16892 ENTRY(int3)
16893 RING0_INT_FRAME
16894 @@ -1409,7 +1661,7 @@ ENTRY(int3)
16895 call do_int3
16896 jmp ret_from_exception
16897 CFI_ENDPROC
16898 -END(int3)
16899 +ENDPROC(int3)
16900
16901 ENTRY(general_protection)
16902 RING0_EC_FRAME
16903 @@ -1417,7 +1669,7 @@ ENTRY(general_protection)
16904 CFI_ADJUST_CFA_OFFSET 4
16905 jmp error_code
16906 CFI_ENDPROC
16907 -END(general_protection)
16908 +ENDPROC(general_protection)
16909
16910 /*
16911 * End of kprobes section
16912 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
16913 index 34a56a9..74613c5 100644
16914 --- a/arch/x86/kernel/entry_64.S
16915 +++ b/arch/x86/kernel/entry_64.S
16916 @@ -53,6 +53,8 @@
16917 #include <asm/paravirt.h>
16918 #include <asm/ftrace.h>
16919 #include <asm/percpu.h>
16920 +#include <asm/pgtable.h>
16921 +#include <asm/alternative-asm.h>
16922
16923 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
16924 #include <linux/elf-em.h>
16925 @@ -64,8 +66,9 @@
16926 #ifdef CONFIG_FUNCTION_TRACER
16927 #ifdef CONFIG_DYNAMIC_FTRACE
16928 ENTRY(mcount)
16929 + pax_force_retaddr
16930 retq
16931 -END(mcount)
16932 +ENDPROC(mcount)
16933
16934 ENTRY(ftrace_caller)
16935 cmpl $0, function_trace_stop
16936 @@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
16937 #endif
16938
16939 GLOBAL(ftrace_stub)
16940 + pax_force_retaddr
16941 retq
16942 -END(ftrace_caller)
16943 +ENDPROC(ftrace_caller)
16944
16945 #else /* ! CONFIG_DYNAMIC_FTRACE */
16946 ENTRY(mcount)
16947 @@ -108,6 +112,7 @@ ENTRY(mcount)
16948 #endif
16949
16950 GLOBAL(ftrace_stub)
16951 + pax_force_retaddr
16952 retq
16953
16954 trace:
16955 @@ -117,12 +122,13 @@ trace:
16956 movq 8(%rbp), %rsi
16957 subq $MCOUNT_INSN_SIZE, %rdi
16958
16959 + pax_force_fptr ftrace_trace_function
16960 call *ftrace_trace_function
16961
16962 MCOUNT_RESTORE_FRAME
16963
16964 jmp ftrace_stub
16965 -END(mcount)
16966 +ENDPROC(mcount)
16967 #endif /* CONFIG_DYNAMIC_FTRACE */
16968 #endif /* CONFIG_FUNCTION_TRACER */
16969
16970 @@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
16971
16972 MCOUNT_RESTORE_FRAME
16973
16974 + pax_force_retaddr
16975 retq
16976 -END(ftrace_graph_caller)
16977 +ENDPROC(ftrace_graph_caller)
16978
16979 GLOBAL(return_to_handler)
16980 subq $24, %rsp
16981 @@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
16982 movq 8(%rsp), %rdx
16983 movq (%rsp), %rax
16984 addq $16, %rsp
16985 + pax_force_retaddr
16986 retq
16987 #endif
16988
16989 @@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
16990 ENDPROC(native_usergs_sysret64)
16991 #endif /* CONFIG_PARAVIRT */
16992
16993 + .macro ljmpq sel, off
16994 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
16995 + .byte 0x48; ljmp *1234f(%rip)
16996 + .pushsection .rodata
16997 + .align 16
16998 + 1234: .quad \off; .word \sel
16999 + .popsection
17000 +#else
17001 + pushq $\sel
17002 + pushq $\off
17003 + lretq
17004 +#endif
17005 + .endm
17006 +
17007 + .macro pax_enter_kernel
17008 + pax_set_fptr_mask
17009 +#ifdef CONFIG_PAX_KERNEXEC
17010 + call pax_enter_kernel
17011 +#endif
17012 + .endm
17013 +
17014 + .macro pax_exit_kernel
17015 +#ifdef CONFIG_PAX_KERNEXEC
17016 + call pax_exit_kernel
17017 +#endif
17018 + .endm
17019 +
17020 +#ifdef CONFIG_PAX_KERNEXEC
17021 +ENTRY(pax_enter_kernel)
17022 + pushq %rdi
17023 +
17024 +#ifdef CONFIG_PARAVIRT
17025 + PV_SAVE_REGS(CLBR_RDI)
17026 +#endif
17027 +
17028 + GET_CR0_INTO_RDI
17029 + bts $16,%rdi
17030 + jnc 3f
17031 + mov %cs,%edi
17032 + cmp $__KERNEL_CS,%edi
17033 + jnz 2f
17034 +1:
17035 +
17036 +#ifdef CONFIG_PARAVIRT
17037 + PV_RESTORE_REGS(CLBR_RDI)
17038 +#endif
17039 +
17040 + popq %rdi
17041 + pax_force_retaddr
17042 + retq
17043 +
17044 +2: ljmpq __KERNEL_CS,1f
17045 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
17046 +4: SET_RDI_INTO_CR0
17047 + jmp 1b
17048 +ENDPROC(pax_enter_kernel)
17049 +
17050 +ENTRY(pax_exit_kernel)
17051 + pushq %rdi
17052 +
17053 +#ifdef CONFIG_PARAVIRT
17054 + PV_SAVE_REGS(CLBR_RDI)
17055 +#endif
17056 +
17057 + mov %cs,%rdi
17058 + cmp $__KERNEXEC_KERNEL_CS,%edi
17059 + jz 2f
17060 +1:
17061 +
17062 +#ifdef CONFIG_PARAVIRT
17063 + PV_RESTORE_REGS(CLBR_RDI);
17064 +#endif
17065 +
17066 + popq %rdi
17067 + pax_force_retaddr
17068 + retq
17069 +
17070 +2: GET_CR0_INTO_RDI
17071 + btr $16,%rdi
17072 + ljmpq __KERNEL_CS,3f
17073 +3: SET_RDI_INTO_CR0
17074 + jmp 1b
17075 +#ifdef CONFIG_PARAVIRT
17076 + PV_RESTORE_REGS(CLBR_RDI);
17077 +#endif
17078 +
17079 + popq %rdi
17080 + pax_force_retaddr
17081 + retq
17082 +ENDPROC(pax_exit_kernel)
17083 +#endif
17084 +
17085 + .macro pax_enter_kernel_user
17086 + pax_set_fptr_mask
17087 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17088 + call pax_enter_kernel_user
17089 +#endif
17090 + .endm
17091 +
17092 + .macro pax_exit_kernel_user
17093 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17094 + call pax_exit_kernel_user
17095 +#endif
17096 +#ifdef CONFIG_PAX_RANDKSTACK
17097 + pushq %rax
17098 + call pax_randomize_kstack
17099 + popq %rax
17100 +#endif
17101 + .endm
17102 +
17103 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17104 +ENTRY(pax_enter_kernel_user)
17105 + pushq %rdi
17106 + pushq %rbx
17107 +
17108 +#ifdef CONFIG_PARAVIRT
17109 + PV_SAVE_REGS(CLBR_RDI)
17110 +#endif
17111 +
17112 + GET_CR3_INTO_RDI
17113 + mov %rdi,%rbx
17114 + add $__START_KERNEL_map,%rbx
17115 + sub phys_base(%rip),%rbx
17116 +
17117 +#ifdef CONFIG_PARAVIRT
17118 + pushq %rdi
17119 + cmpl $0, pv_info+PARAVIRT_enabled
17120 + jz 1f
17121 + i = 0
17122 + .rept USER_PGD_PTRS
17123 + mov i*8(%rbx),%rsi
17124 + mov $0,%sil
17125 + lea i*8(%rbx),%rdi
17126 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17127 + i = i + 1
17128 + .endr
17129 + jmp 2f
17130 +1:
17131 +#endif
17132 +
17133 + i = 0
17134 + .rept USER_PGD_PTRS
17135 + movb $0,i*8(%rbx)
17136 + i = i + 1
17137 + .endr
17138 +
17139 +#ifdef CONFIG_PARAVIRT
17140 +2: popq %rdi
17141 +#endif
17142 + SET_RDI_INTO_CR3
17143 +
17144 +#ifdef CONFIG_PAX_KERNEXEC
17145 + GET_CR0_INTO_RDI
17146 + bts $16,%rdi
17147 + SET_RDI_INTO_CR0
17148 +#endif
17149 +
17150 +#ifdef CONFIG_PARAVIRT
17151 + PV_RESTORE_REGS(CLBR_RDI)
17152 +#endif
17153 +
17154 + popq %rbx
17155 + popq %rdi
17156 + pax_force_retaddr
17157 + retq
17158 +ENDPROC(pax_enter_kernel_user)
17159 +
17160 +ENTRY(pax_exit_kernel_user)
17161 + push %rdi
17162 +
17163 +#ifdef CONFIG_PARAVIRT
17164 + pushq %rbx
17165 + PV_SAVE_REGS(CLBR_RDI)
17166 +#endif
17167 +
17168 +#ifdef CONFIG_PAX_KERNEXEC
17169 + GET_CR0_INTO_RDI
17170 + btr $16,%rdi
17171 + SET_RDI_INTO_CR0
17172 +#endif
17173 +
17174 + GET_CR3_INTO_RDI
17175 + add $__START_KERNEL_map,%rdi
17176 + sub phys_base(%rip),%rdi
17177 +
17178 +#ifdef CONFIG_PARAVIRT
17179 + cmpl $0, pv_info+PARAVIRT_enabled
17180 + jz 1f
17181 + mov %rdi,%rbx
17182 + i = 0
17183 + .rept USER_PGD_PTRS
17184 + mov i*8(%rbx),%rsi
17185 + mov $0x67,%sil
17186 + lea i*8(%rbx),%rdi
17187 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17188 + i = i + 1
17189 + .endr
17190 + jmp 2f
17191 +1:
17192 +#endif
17193 +
17194 + i = 0
17195 + .rept USER_PGD_PTRS
17196 + movb $0x67,i*8(%rdi)
17197 + i = i + 1
17198 + .endr
17199 +
17200 +#ifdef CONFIG_PARAVIRT
17201 +2: PV_RESTORE_REGS(CLBR_RDI)
17202 + popq %rbx
17203 +#endif
17204 +
17205 + popq %rdi
17206 + pax_force_retaddr
17207 + retq
17208 +ENDPROC(pax_exit_kernel_user)
17209 +#endif
17210 +
17211 +.macro pax_erase_kstack
17212 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17213 + call pax_erase_kstack
17214 +#endif
17215 +.endm
17216 +
17217 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17218 +/*
17219 + * r11: thread_info
17220 + * rcx, rdx: can be clobbered
17221 + */
17222 +ENTRY(pax_erase_kstack)
17223 + pushq %rdi
17224 + pushq %rax
17225 + pushq %r11
17226 +
17227 + GET_THREAD_INFO(%r11)
17228 + mov TI_lowest_stack(%r11), %rdi
17229 + mov $-0xBEEF, %rax
17230 + std
17231 +
17232 +1: mov %edi, %ecx
17233 + and $THREAD_SIZE_asm - 1, %ecx
17234 + shr $3, %ecx
17235 + repne scasq
17236 + jecxz 2f
17237 +
17238 + cmp $2*8, %ecx
17239 + jc 2f
17240 +
17241 + mov $2*8, %ecx
17242 + repe scasq
17243 + jecxz 2f
17244 + jne 1b
17245 +
17246 +2: cld
17247 + mov %esp, %ecx
17248 + sub %edi, %ecx
17249 +
17250 + cmp $THREAD_SIZE_asm, %rcx
17251 + jb 3f
17252 + ud2
17253 +3:
17254 +
17255 + shr $3, %ecx
17256 + rep stosq
17257 +
17258 + mov TI_task_thread_sp0(%r11), %rdi
17259 + sub $256, %rdi
17260 + mov %rdi, TI_lowest_stack(%r11)
17261 +
17262 + popq %r11
17263 + popq %rax
17264 + popq %rdi
17265 + pax_force_retaddr
17266 + ret
17267 +ENDPROC(pax_erase_kstack)
17268 +#endif
17269
17270 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
17271 #ifdef CONFIG_TRACE_IRQFLAGS
17272 @@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
17273 .endm
17274
17275 .macro UNFAKE_STACK_FRAME
17276 - addq $8*6, %rsp
17277 - CFI_ADJUST_CFA_OFFSET -(6*8)
17278 + addq $8*6 + ARG_SKIP, %rsp
17279 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
17280 .endm
17281
17282 /*
17283 @@ -317,7 +601,7 @@ ENTRY(save_args)
17284 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
17285 movq_cfi rbp, 8 /* push %rbp */
17286 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
17287 - testl $3, CS(%rdi)
17288 + testb $3, CS(%rdi)
17289 je 1f
17290 SWAPGS
17291 /*
17292 @@ -337,9 +621,10 @@ ENTRY(save_args)
17293 * We entered an interrupt context - irqs are off:
17294 */
17295 2: TRACE_IRQS_OFF
17296 + pax_force_retaddr_bts
17297 ret
17298 CFI_ENDPROC
17299 -END(save_args)
17300 +ENDPROC(save_args)
17301
17302 ENTRY(save_rest)
17303 PARTIAL_FRAME 1 REST_SKIP+8
17304 @@ -352,9 +637,10 @@ ENTRY(save_rest)
17305 movq_cfi r15, R15+16
17306 movq %r11, 8(%rsp) /* return address */
17307 FIXUP_TOP_OF_STACK %r11, 16
17308 + pax_force_retaddr
17309 ret
17310 CFI_ENDPROC
17311 -END(save_rest)
17312 +ENDPROC(save_rest)
17313
17314 /* save complete stack frame */
17315 .pushsection .kprobes.text, "ax"
17316 @@ -383,9 +669,10 @@ ENTRY(save_paranoid)
17317 js 1f /* negative -> in kernel */
17318 SWAPGS
17319 xorl %ebx,%ebx
17320 -1: ret
17321 +1: pax_force_retaddr_bts
17322 + ret
17323 CFI_ENDPROC
17324 -END(save_paranoid)
17325 +ENDPROC(save_paranoid)
17326 .popsection
17327
17328 /*
17329 @@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
17330
17331 RESTORE_REST
17332
17333 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17334 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17335 je int_ret_from_sys_call
17336
17337 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
17338 @@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
17339 jmp ret_from_sys_call # go to the SYSRET fastpath
17340
17341 CFI_ENDPROC
17342 -END(ret_from_fork)
17343 +ENDPROC(ret_from_fork)
17344
17345 /*
17346 * System call entry. Upto 6 arguments in registers are supported.
17347 @@ -455,7 +742,7 @@ END(ret_from_fork)
17348 ENTRY(system_call)
17349 CFI_STARTPROC simple
17350 CFI_SIGNAL_FRAME
17351 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
17352 + CFI_DEF_CFA rsp,0
17353 CFI_REGISTER rip,rcx
17354 /*CFI_REGISTER rflags,r11*/
17355 SWAPGS_UNSAFE_STACK
17356 @@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
17357
17358 movq %rsp,PER_CPU_VAR(old_rsp)
17359 movq PER_CPU_VAR(kernel_stack),%rsp
17360 + SAVE_ARGS 8*6,1
17361 + pax_enter_kernel_user
17362 /*
17363 * No need to follow this irqs off/on section - it's straight
17364 * and short:
17365 */
17366 ENABLE_INTERRUPTS(CLBR_NONE)
17367 - SAVE_ARGS 8,1
17368 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
17369 movq %rcx,RIP-ARGOFFSET(%rsp)
17370 CFI_REL_OFFSET rip,RIP-ARGOFFSET
17371 @@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
17372 system_call_fastpath:
17373 cmpq $__NR_syscall_max,%rax
17374 ja badsys
17375 - movq %r10,%rcx
17376 + movq R10-ARGOFFSET(%rsp),%rcx
17377 call *sys_call_table(,%rax,8) # XXX: rip relative
17378 movq %rax,RAX-ARGOFFSET(%rsp)
17379 /*
17380 @@ -502,6 +790,8 @@ sysret_check:
17381 andl %edi,%edx
17382 jnz sysret_careful
17383 CFI_REMEMBER_STATE
17384 + pax_exit_kernel_user
17385 + pax_erase_kstack
17386 /*
17387 * sysretq will re-enable interrupts:
17388 */
17389 @@ -555,14 +845,18 @@ badsys:
17390 * jump back to the normal fast path.
17391 */
17392 auditsys:
17393 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
17394 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
17395 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
17396 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
17397 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
17398 movq %rax,%rsi /* 2nd arg: syscall number */
17399 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
17400 call audit_syscall_entry
17401 +
17402 + pax_erase_kstack
17403 +
17404 LOAD_ARGS 0 /* reload call-clobbered registers */
17405 + pax_set_fptr_mask
17406 jmp system_call_fastpath
17407
17408 /*
17409 @@ -592,16 +886,20 @@ tracesys:
17410 FIXUP_TOP_OF_STACK %rdi
17411 movq %rsp,%rdi
17412 call syscall_trace_enter
17413 +
17414 + pax_erase_kstack
17415 +
17416 /*
17417 * Reload arg registers from stack in case ptrace changed them.
17418 * We don't reload %rax because syscall_trace_enter() returned
17419 * the value it wants us to use in the table lookup.
17420 */
17421 LOAD_ARGS ARGOFFSET, 1
17422 + pax_set_fptr_mask
17423 RESTORE_REST
17424 cmpq $__NR_syscall_max,%rax
17425 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
17426 - movq %r10,%rcx /* fixup for C */
17427 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
17428 call *sys_call_table(,%rax,8)
17429 movq %rax,RAX-ARGOFFSET(%rsp)
17430 /* Use IRET because user could have changed frame */
17431 @@ -613,7 +911,7 @@ tracesys:
17432 GLOBAL(int_ret_from_sys_call)
17433 DISABLE_INTERRUPTS(CLBR_NONE)
17434 TRACE_IRQS_OFF
17435 - testl $3,CS-ARGOFFSET(%rsp)
17436 + testb $3,CS-ARGOFFSET(%rsp)
17437 je retint_restore_args
17438 movl $_TIF_ALLWORK_MASK,%edi
17439 /* edi: mask to check */
17440 @@ -624,6 +922,7 @@ GLOBAL(int_with_check)
17441 andl %edi,%edx
17442 jnz int_careful
17443 andl $~TS_COMPAT,TI_status(%rcx)
17444 + pax_erase_kstack
17445 jmp retint_swapgs
17446
17447 /* Either reschedule or signal or syscall exit tracking needed. */
17448 @@ -674,7 +973,7 @@ int_restore_rest:
17449 TRACE_IRQS_OFF
17450 jmp int_with_check
17451 CFI_ENDPROC
17452 -END(system_call)
17453 +ENDPROC(system_call)
17454
17455 /*
17456 * Certain special system calls that need to save a complete full stack frame.
17457 @@ -690,7 +989,7 @@ ENTRY(\label)
17458 call \func
17459 jmp ptregscall_common
17460 CFI_ENDPROC
17461 -END(\label)
17462 +ENDPROC(\label)
17463 .endm
17464
17465 PTREGSCALL stub_clone, sys_clone, %r8
17466 @@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
17467 movq_cfi_restore R12+8, r12
17468 movq_cfi_restore RBP+8, rbp
17469 movq_cfi_restore RBX+8, rbx
17470 + pax_force_retaddr
17471 ret $REST_SKIP /* pop extended registers */
17472 CFI_ENDPROC
17473 -END(ptregscall_common)
17474 +ENDPROC(ptregscall_common)
17475
17476 ENTRY(stub_execve)
17477 CFI_STARTPROC
17478 @@ -726,7 +1026,7 @@ ENTRY(stub_execve)
17479 RESTORE_REST
17480 jmp int_ret_from_sys_call
17481 CFI_ENDPROC
17482 -END(stub_execve)
17483 +ENDPROC(stub_execve)
17484
17485 /*
17486 * sigreturn is special because it needs to restore all registers on return.
17487 @@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
17488 RESTORE_REST
17489 jmp int_ret_from_sys_call
17490 CFI_ENDPROC
17491 -END(stub_rt_sigreturn)
17492 +ENDPROC(stub_rt_sigreturn)
17493
17494 /*
17495 * Build the entry stubs and pointer table with some assembler magic.
17496 @@ -780,7 +1080,7 @@ vector=vector+1
17497 2: jmp common_interrupt
17498 .endr
17499 CFI_ENDPROC
17500 -END(irq_entries_start)
17501 +ENDPROC(irq_entries_start)
17502
17503 .previous
17504 END(interrupt)
17505 @@ -800,6 +1100,16 @@ END(interrupt)
17506 CFI_ADJUST_CFA_OFFSET 10*8
17507 call save_args
17508 PARTIAL_FRAME 0
17509 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17510 + testb $3, CS(%rdi)
17511 + jnz 1f
17512 + pax_enter_kernel
17513 + jmp 2f
17514 +1: pax_enter_kernel_user
17515 +2:
17516 +#else
17517 + pax_enter_kernel
17518 +#endif
17519 call \func
17520 .endm
17521
17522 @@ -822,7 +1132,7 @@ ret_from_intr:
17523 CFI_ADJUST_CFA_OFFSET -8
17524 exit_intr:
17525 GET_THREAD_INFO(%rcx)
17526 - testl $3,CS-ARGOFFSET(%rsp)
17527 + testb $3,CS-ARGOFFSET(%rsp)
17528 je retint_kernel
17529
17530 /* Interrupt came from user space */
17531 @@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
17532 * The iretq could re-enable interrupts:
17533 */
17534 DISABLE_INTERRUPTS(CLBR_ANY)
17535 + pax_exit_kernel_user
17536 TRACE_IRQS_IRETQ
17537 SWAPGS
17538 jmp restore_args
17539
17540 retint_restore_args: /* return to kernel space */
17541 DISABLE_INTERRUPTS(CLBR_ANY)
17542 + pax_exit_kernel
17543 + pax_force_retaddr RIP-ARGOFFSET
17544 /*
17545 * The iretq could re-enable interrupts:
17546 */
17547 @@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
17548 #endif
17549
17550 CFI_ENDPROC
17551 -END(common_interrupt)
17552 +ENDPROC(common_interrupt)
17553
17554 /*
17555 * APIC interrupts.
17556 @@ -953,7 +1266,7 @@ ENTRY(\sym)
17557 interrupt \do_sym
17558 jmp ret_from_intr
17559 CFI_ENDPROC
17560 -END(\sym)
17561 +ENDPROC(\sym)
17562 .endm
17563
17564 #ifdef CONFIG_SMP
17565 @@ -1032,12 +1345,22 @@ ENTRY(\sym)
17566 CFI_ADJUST_CFA_OFFSET 15*8
17567 call error_entry
17568 DEFAULT_FRAME 0
17569 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17570 + testb $3, CS(%rsp)
17571 + jnz 1f
17572 + pax_enter_kernel
17573 + jmp 2f
17574 +1: pax_enter_kernel_user
17575 +2:
17576 +#else
17577 + pax_enter_kernel
17578 +#endif
17579 movq %rsp,%rdi /* pt_regs pointer */
17580 xorl %esi,%esi /* no error code */
17581 call \do_sym
17582 jmp error_exit /* %ebx: no swapgs flag */
17583 CFI_ENDPROC
17584 -END(\sym)
17585 +ENDPROC(\sym)
17586 .endm
17587
17588 .macro paranoidzeroentry sym do_sym
17589 @@ -1049,12 +1372,22 @@ ENTRY(\sym)
17590 subq $15*8, %rsp
17591 call save_paranoid
17592 TRACE_IRQS_OFF
17593 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17594 + testb $3, CS(%rsp)
17595 + jnz 1f
17596 + pax_enter_kernel
17597 + jmp 2f
17598 +1: pax_enter_kernel_user
17599 +2:
17600 +#else
17601 + pax_enter_kernel
17602 +#endif
17603 movq %rsp,%rdi /* pt_regs pointer */
17604 xorl %esi,%esi /* no error code */
17605 call \do_sym
17606 jmp paranoid_exit /* %ebx: no swapgs flag */
17607 CFI_ENDPROC
17608 -END(\sym)
17609 +ENDPROC(\sym)
17610 .endm
17611
17612 .macro paranoidzeroentry_ist sym do_sym ist
17613 @@ -1066,15 +1399,30 @@ ENTRY(\sym)
17614 subq $15*8, %rsp
17615 call save_paranoid
17616 TRACE_IRQS_OFF
17617 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17618 + testb $3, CS(%rsp)
17619 + jnz 1f
17620 + pax_enter_kernel
17621 + jmp 2f
17622 +1: pax_enter_kernel_user
17623 +2:
17624 +#else
17625 + pax_enter_kernel
17626 +#endif
17627 movq %rsp,%rdi /* pt_regs pointer */
17628 xorl %esi,%esi /* no error code */
17629 - PER_CPU(init_tss, %rbp)
17630 +#ifdef CONFIG_SMP
17631 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
17632 + lea init_tss(%rbp), %rbp
17633 +#else
17634 + lea init_tss(%rip), %rbp
17635 +#endif
17636 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17637 call \do_sym
17638 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17639 jmp paranoid_exit /* %ebx: no swapgs flag */
17640 CFI_ENDPROC
17641 -END(\sym)
17642 +ENDPROC(\sym)
17643 .endm
17644
17645 .macro errorentry sym do_sym
17646 @@ -1085,13 +1433,23 @@ ENTRY(\sym)
17647 CFI_ADJUST_CFA_OFFSET 15*8
17648 call error_entry
17649 DEFAULT_FRAME 0
17650 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17651 + testb $3, CS(%rsp)
17652 + jnz 1f
17653 + pax_enter_kernel
17654 + jmp 2f
17655 +1: pax_enter_kernel_user
17656 +2:
17657 +#else
17658 + pax_enter_kernel
17659 +#endif
17660 movq %rsp,%rdi /* pt_regs pointer */
17661 movq ORIG_RAX(%rsp),%rsi /* get error code */
17662 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17663 call \do_sym
17664 jmp error_exit /* %ebx: no swapgs flag */
17665 CFI_ENDPROC
17666 -END(\sym)
17667 +ENDPROC(\sym)
17668 .endm
17669
17670 /* error code is on the stack already */
17671 @@ -1104,13 +1462,23 @@ ENTRY(\sym)
17672 call save_paranoid
17673 DEFAULT_FRAME 0
17674 TRACE_IRQS_OFF
17675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17676 + testb $3, CS(%rsp)
17677 + jnz 1f
17678 + pax_enter_kernel
17679 + jmp 2f
17680 +1: pax_enter_kernel_user
17681 +2:
17682 +#else
17683 + pax_enter_kernel
17684 +#endif
17685 movq %rsp,%rdi /* pt_regs pointer */
17686 movq ORIG_RAX(%rsp),%rsi /* get error code */
17687 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17688 call \do_sym
17689 jmp paranoid_exit /* %ebx: no swapgs flag */
17690 CFI_ENDPROC
17691 -END(\sym)
17692 +ENDPROC(\sym)
17693 .endm
17694
17695 zeroentry divide_error do_divide_error
17696 @@ -1141,9 +1509,10 @@ gs_change:
17697 SWAPGS
17698 popf
17699 CFI_ADJUST_CFA_OFFSET -8
17700 + pax_force_retaddr
17701 ret
17702 CFI_ENDPROC
17703 -END(native_load_gs_index)
17704 +ENDPROC(native_load_gs_index)
17705
17706 .section __ex_table,"a"
17707 .align 8
17708 @@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
17709 * of hacks for example to fork off the per-CPU idle tasks.
17710 * [Hopefully no generic code relies on the reschedule -AK]
17711 */
17712 - RESTORE_ALL
17713 + RESTORE_REST
17714 UNFAKE_STACK_FRAME
17715 + pax_force_retaddr
17716 ret
17717 CFI_ENDPROC
17718 -END(kernel_thread)
17719 +ENDPROC(kernel_thread)
17720
17721 ENTRY(child_rip)
17722 pushq $0 # fake return address
17723 @@ -1208,13 +1578,14 @@ ENTRY(child_rip)
17724 */
17725 movq %rdi, %rax
17726 movq %rsi, %rdi
17727 + pax_force_fptr %rax
17728 call *%rax
17729 # exit
17730 mov %eax, %edi
17731 call do_exit
17732 ud2 # padding for call trace
17733 CFI_ENDPROC
17734 -END(child_rip)
17735 +ENDPROC(child_rip)
17736
17737 /*
17738 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
17739 @@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
17740 RESTORE_REST
17741 testq %rax,%rax
17742 je int_ret_from_sys_call
17743 - RESTORE_ARGS
17744 UNFAKE_STACK_FRAME
17745 + pax_force_retaddr
17746 ret
17747 CFI_ENDPROC
17748 -END(kernel_execve)
17749 +ENDPROC(kernel_execve)
17750
17751 /* Call softirq on interrupt stack. Interrupts are off. */
17752 ENTRY(call_softirq)
17753 @@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
17754 CFI_DEF_CFA_REGISTER rsp
17755 CFI_ADJUST_CFA_OFFSET -8
17756 decl PER_CPU_VAR(irq_count)
17757 + pax_force_retaddr
17758 ret
17759 CFI_ENDPROC
17760 -END(call_softirq)
17761 +ENDPROC(call_softirq)
17762
17763 #ifdef CONFIG_XEN
17764 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
17765 @@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
17766 decl PER_CPU_VAR(irq_count)
17767 jmp error_exit
17768 CFI_ENDPROC
17769 -END(xen_do_hypervisor_callback)
17770 +ENDPROC(xen_do_hypervisor_callback)
17771
17772 /*
17773 * Hypervisor uses this for application faults while it executes.
17774 @@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
17775 SAVE_ALL
17776 jmp error_exit
17777 CFI_ENDPROC
17778 -END(xen_failsafe_callback)
17779 +ENDPROC(xen_failsafe_callback)
17780
17781 #endif /* CONFIG_XEN */
17782
17783 @@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
17784 TRACE_IRQS_OFF
17785 testl %ebx,%ebx /* swapgs needed? */
17786 jnz paranoid_restore
17787 - testl $3,CS(%rsp)
17788 + testb $3,CS(%rsp)
17789 jnz paranoid_userspace
17790 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17791 + pax_exit_kernel
17792 + TRACE_IRQS_IRETQ 0
17793 + SWAPGS_UNSAFE_STACK
17794 + RESTORE_ALL 8
17795 + pax_force_retaddr_bts
17796 + jmp irq_return
17797 +#endif
17798 paranoid_swapgs:
17799 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17800 + pax_exit_kernel_user
17801 +#else
17802 + pax_exit_kernel
17803 +#endif
17804 TRACE_IRQS_IRETQ 0
17805 SWAPGS_UNSAFE_STACK
17806 RESTORE_ALL 8
17807 jmp irq_return
17808 paranoid_restore:
17809 + pax_exit_kernel
17810 TRACE_IRQS_IRETQ 0
17811 RESTORE_ALL 8
17812 + pax_force_retaddr_bts
17813 jmp irq_return
17814 paranoid_userspace:
17815 GET_THREAD_INFO(%rcx)
17816 @@ -1443,7 +1830,7 @@ paranoid_schedule:
17817 TRACE_IRQS_OFF
17818 jmp paranoid_userspace
17819 CFI_ENDPROC
17820 -END(paranoid_exit)
17821 +ENDPROC(paranoid_exit)
17822
17823 /*
17824 * Exception entry point. This expects an error code/orig_rax on the stack.
17825 @@ -1470,12 +1857,13 @@ ENTRY(error_entry)
17826 movq_cfi r14, R14+8
17827 movq_cfi r15, R15+8
17828 xorl %ebx,%ebx
17829 - testl $3,CS+8(%rsp)
17830 + testb $3,CS+8(%rsp)
17831 je error_kernelspace
17832 error_swapgs:
17833 SWAPGS
17834 error_sti:
17835 TRACE_IRQS_OFF
17836 + pax_force_retaddr_bts
17837 ret
17838 CFI_ENDPROC
17839
17840 @@ -1497,7 +1885,7 @@ error_kernelspace:
17841 cmpq $gs_change,RIP+8(%rsp)
17842 je error_swapgs
17843 jmp error_sti
17844 -END(error_entry)
17845 +ENDPROC(error_entry)
17846
17847
17848 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
17849 @@ -1517,7 +1905,7 @@ ENTRY(error_exit)
17850 jnz retint_careful
17851 jmp retint_swapgs
17852 CFI_ENDPROC
17853 -END(error_exit)
17854 +ENDPROC(error_exit)
17855
17856
17857 /* runs on exception stack */
17858 @@ -1529,6 +1917,16 @@ ENTRY(nmi)
17859 CFI_ADJUST_CFA_OFFSET 15*8
17860 call save_paranoid
17861 DEFAULT_FRAME 0
17862 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17863 + testb $3, CS(%rsp)
17864 + jnz 1f
17865 + pax_enter_kernel
17866 + jmp 2f
17867 +1: pax_enter_kernel_user
17868 +2:
17869 +#else
17870 + pax_enter_kernel
17871 +#endif
17872 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
17873 movq %rsp,%rdi
17874 movq $-1,%rsi
17875 @@ -1539,12 +1937,28 @@ ENTRY(nmi)
17876 DISABLE_INTERRUPTS(CLBR_NONE)
17877 testl %ebx,%ebx /* swapgs needed? */
17878 jnz nmi_restore
17879 - testl $3,CS(%rsp)
17880 + testb $3,CS(%rsp)
17881 jnz nmi_userspace
17882 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17883 + pax_exit_kernel
17884 + SWAPGS_UNSAFE_STACK
17885 + RESTORE_ALL 8
17886 + pax_force_retaddr_bts
17887 + jmp irq_return
17888 +#endif
17889 nmi_swapgs:
17890 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17891 + pax_exit_kernel_user
17892 +#else
17893 + pax_exit_kernel
17894 +#endif
17895 SWAPGS_UNSAFE_STACK
17896 + RESTORE_ALL 8
17897 + jmp irq_return
17898 nmi_restore:
17899 + pax_exit_kernel
17900 RESTORE_ALL 8
17901 + pax_force_retaddr_bts
17902 jmp irq_return
17903 nmi_userspace:
17904 GET_THREAD_INFO(%rcx)
17905 @@ -1573,14 +1987,14 @@ nmi_schedule:
17906 jmp paranoid_exit
17907 CFI_ENDPROC
17908 #endif
17909 -END(nmi)
17910 +ENDPROC(nmi)
17911
17912 ENTRY(ignore_sysret)
17913 CFI_STARTPROC
17914 mov $-ENOSYS,%eax
17915 sysret
17916 CFI_ENDPROC
17917 -END(ignore_sysret)
17918 +ENDPROC(ignore_sysret)
17919
17920 /*
17921 * End of kprobes section
17922 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
17923 index 9dbb527..7b3615a 100644
17924 --- a/arch/x86/kernel/ftrace.c
17925 +++ b/arch/x86/kernel/ftrace.c
17926 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
17927 static void *mod_code_newcode; /* holds the text to write to the IP */
17928
17929 static unsigned nmi_wait_count;
17930 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
17931 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
17932
17933 int ftrace_arch_read_dyn_info(char *buf, int size)
17934 {
17935 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
17936
17937 r = snprintf(buf, size, "%u %u",
17938 nmi_wait_count,
17939 - atomic_read(&nmi_update_count));
17940 + atomic_read_unchecked(&nmi_update_count));
17941 return r;
17942 }
17943
17944 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
17945 {
17946 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
17947 smp_rmb();
17948 + pax_open_kernel();
17949 ftrace_mod_code();
17950 - atomic_inc(&nmi_update_count);
17951 + pax_close_kernel();
17952 + atomic_inc_unchecked(&nmi_update_count);
17953 }
17954 /* Must have previous changes seen before executions */
17955 smp_mb();
17956 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
17957
17958
17959
17960 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
17961 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
17962
17963 static unsigned char *ftrace_nop_replace(void)
17964 {
17965 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
17966 {
17967 unsigned char replaced[MCOUNT_INSN_SIZE];
17968
17969 + ip = ktla_ktva(ip);
17970 +
17971 /*
17972 * Note: Due to modules and __init, code can
17973 * disappear and change, we need to protect against faulting
17974 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17975 unsigned char old[MCOUNT_INSN_SIZE], *new;
17976 int ret;
17977
17978 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
17979 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
17980 new = ftrace_call_replace(ip, (unsigned long)func);
17981 ret = ftrace_modify_code(ip, old, new);
17982
17983 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
17984 switch (faulted) {
17985 case 0:
17986 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
17987 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
17988 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
17989 break;
17990 case 1:
17991 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
17992 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
17993 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
17994 break;
17995 case 2:
17996 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
17997 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
17998 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
17999 break;
18000 }
18001
18002 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
18003 {
18004 unsigned char code[MCOUNT_INSN_SIZE];
18005
18006 + ip = ktla_ktva(ip);
18007 +
18008 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
18009 return -EFAULT;
18010
18011 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
18012 index 4f8e250..df24706 100644
18013 --- a/arch/x86/kernel/head32.c
18014 +++ b/arch/x86/kernel/head32.c
18015 @@ -16,6 +16,7 @@
18016 #include <asm/apic.h>
18017 #include <asm/io_apic.h>
18018 #include <asm/bios_ebda.h>
18019 +#include <asm/boot.h>
18020
18021 static void __init i386_default_early_setup(void)
18022 {
18023 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
18024 {
18025 reserve_trampoline_memory();
18026
18027 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
18028 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
18029
18030 #ifdef CONFIG_BLK_DEV_INITRD
18031 /* Reserve INITRD */
18032 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
18033 index 34c3308..6fc4e76 100644
18034 --- a/arch/x86/kernel/head_32.S
18035 +++ b/arch/x86/kernel/head_32.S
18036 @@ -19,10 +19,17 @@
18037 #include <asm/setup.h>
18038 #include <asm/processor-flags.h>
18039 #include <asm/percpu.h>
18040 +#include <asm/msr-index.h>
18041
18042 /* Physical address */
18043 #define pa(X) ((X) - __PAGE_OFFSET)
18044
18045 +#ifdef CONFIG_PAX_KERNEXEC
18046 +#define ta(X) (X)
18047 +#else
18048 +#define ta(X) ((X) - __PAGE_OFFSET)
18049 +#endif
18050 +
18051 /*
18052 * References to members of the new_cpu_data structure.
18053 */
18054 @@ -52,11 +59,7 @@
18055 * and small than max_low_pfn, otherwise will waste some page table entries
18056 */
18057
18058 -#if PTRS_PER_PMD > 1
18059 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
18060 -#else
18061 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
18062 -#endif
18063 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
18064
18065 /* Enough space to fit pagetables for the low memory linear map */
18066 MAPPING_BEYOND_END = \
18067 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
18068 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18069
18070 /*
18071 + * Real beginning of normal "text" segment
18072 + */
18073 +ENTRY(stext)
18074 +ENTRY(_stext)
18075 +
18076 +/*
18077 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
18078 * %esi points to the real-mode code as a 32-bit pointer.
18079 * CS and DS must be 4 GB flat segments, but we don't depend on
18080 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18081 * can.
18082 */
18083 __HEAD
18084 +
18085 +#ifdef CONFIG_PAX_KERNEXEC
18086 + jmp startup_32
18087 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
18088 +.fill PAGE_SIZE-5,1,0xcc
18089 +#endif
18090 +
18091 ENTRY(startup_32)
18092 + movl pa(stack_start),%ecx
18093 +
18094 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
18095 us to not reload segments */
18096 testb $(1<<6), BP_loadflags(%esi)
18097 @@ -95,7 +113,60 @@ ENTRY(startup_32)
18098 movl %eax,%es
18099 movl %eax,%fs
18100 movl %eax,%gs
18101 + movl %eax,%ss
18102 2:
18103 + leal -__PAGE_OFFSET(%ecx),%esp
18104 +
18105 +#ifdef CONFIG_SMP
18106 + movl $pa(cpu_gdt_table),%edi
18107 + movl $__per_cpu_load,%eax
18108 + movw %ax,__KERNEL_PERCPU + 2(%edi)
18109 + rorl $16,%eax
18110 + movb %al,__KERNEL_PERCPU + 4(%edi)
18111 + movb %ah,__KERNEL_PERCPU + 7(%edi)
18112 + movl $__per_cpu_end - 1,%eax
18113 + subl $__per_cpu_start,%eax
18114 + movw %ax,__KERNEL_PERCPU + 0(%edi)
18115 +#endif
18116 +
18117 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18118 + movl $NR_CPUS,%ecx
18119 + movl $pa(cpu_gdt_table),%edi
18120 +1:
18121 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
18122 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
18123 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
18124 + addl $PAGE_SIZE_asm,%edi
18125 + loop 1b
18126 +#endif
18127 +
18128 +#ifdef CONFIG_PAX_KERNEXEC
18129 + movl $pa(boot_gdt),%edi
18130 + movl $__LOAD_PHYSICAL_ADDR,%eax
18131 + movw %ax,__BOOT_CS + 2(%edi)
18132 + rorl $16,%eax
18133 + movb %al,__BOOT_CS + 4(%edi)
18134 + movb %ah,__BOOT_CS + 7(%edi)
18135 + rorl $16,%eax
18136 +
18137 + ljmp $(__BOOT_CS),$1f
18138 +1:
18139 +
18140 + movl $NR_CPUS,%ecx
18141 + movl $pa(cpu_gdt_table),%edi
18142 + addl $__PAGE_OFFSET,%eax
18143 +1:
18144 + movw %ax,__KERNEL_CS + 2(%edi)
18145 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
18146 + rorl $16,%eax
18147 + movb %al,__KERNEL_CS + 4(%edi)
18148 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
18149 + movb %ah,__KERNEL_CS + 7(%edi)
18150 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
18151 + rorl $16,%eax
18152 + addl $PAGE_SIZE_asm,%edi
18153 + loop 1b
18154 +#endif
18155
18156 /*
18157 * Clear BSS first so that there are no surprises...
18158 @@ -140,9 +211,7 @@ ENTRY(startup_32)
18159 cmpl $num_subarch_entries, %eax
18160 jae bad_subarch
18161
18162 - movl pa(subarch_entries)(,%eax,4), %eax
18163 - subl $__PAGE_OFFSET, %eax
18164 - jmp *%eax
18165 + jmp *pa(subarch_entries)(,%eax,4)
18166
18167 bad_subarch:
18168 WEAK(lguest_entry)
18169 @@ -154,10 +223,10 @@ WEAK(xen_entry)
18170 __INITDATA
18171
18172 subarch_entries:
18173 - .long default_entry /* normal x86/PC */
18174 - .long lguest_entry /* lguest hypervisor */
18175 - .long xen_entry /* Xen hypervisor */
18176 - .long default_entry /* Moorestown MID */
18177 + .long ta(default_entry) /* normal x86/PC */
18178 + .long ta(lguest_entry) /* lguest hypervisor */
18179 + .long ta(xen_entry) /* Xen hypervisor */
18180 + .long ta(default_entry) /* Moorestown MID */
18181 num_subarch_entries = (. - subarch_entries) / 4
18182 .previous
18183 #endif /* CONFIG_PARAVIRT */
18184 @@ -218,8 +287,11 @@ default_entry:
18185 movl %eax, pa(max_pfn_mapped)
18186
18187 /* Do early initialization of the fixmap area */
18188 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
18189 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18190 +#ifdef CONFIG_COMPAT_VDSO
18191 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18192 +#else
18193 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18194 +#endif
18195 #else /* Not PAE */
18196
18197 page_pde_offset = (__PAGE_OFFSET >> 20);
18198 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18199 movl %eax, pa(max_pfn_mapped)
18200
18201 /* Do early initialization of the fixmap area */
18202 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
18203 - movl %eax,pa(swapper_pg_dir+0xffc)
18204 +#ifdef CONFIG_COMPAT_VDSO
18205 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
18206 +#else
18207 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
18208 +#endif
18209 #endif
18210 jmp 3f
18211 /*
18212 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
18213 movl %eax,%es
18214 movl %eax,%fs
18215 movl %eax,%gs
18216 + movl pa(stack_start),%ecx
18217 + movl %eax,%ss
18218 + leal -__PAGE_OFFSET(%ecx),%esp
18219 #endif /* CONFIG_SMP */
18220 3:
18221
18222 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
18223 orl %edx,%eax
18224 movl %eax,%cr4
18225
18226 +#ifdef CONFIG_X86_PAE
18227 btl $5, %eax # check if PAE is enabled
18228 jnc 6f
18229
18230 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
18231 cpuid
18232 cmpl $0x80000000, %eax
18233 jbe 6f
18234 +
18235 + /* Clear bogus XD_DISABLE bits */
18236 + call verify_cpu
18237 +
18238 mov $0x80000001, %eax
18239 cpuid
18240 /* Execute Disable bit supported? */
18241 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
18242 jnc 6f
18243
18244 /* Setup EFER (Extended Feature Enable Register) */
18245 - movl $0xc0000080, %ecx
18246 + movl $MSR_EFER, %ecx
18247 rdmsr
18248
18249 btsl $11, %eax
18250 /* Make changes effective */
18251 wrmsr
18252
18253 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
18254 + movl $1,pa(nx_enabled)
18255 +#endif
18256 +
18257 6:
18258
18259 /*
18260 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
18261 movl %eax,%cr0 /* ..and set paging (PG) bit */
18262 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
18263 1:
18264 - /* Set up the stack pointer */
18265 - lss stack_start,%esp
18266 + /* Shift the stack pointer to a virtual address */
18267 + addl $__PAGE_OFFSET, %esp
18268
18269 /*
18270 * Initialize eflags. Some BIOS's leave bits like NT set. This would
18271 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
18272
18273 #ifdef CONFIG_SMP
18274 cmpb $0, ready
18275 - jz 1f /* Initial CPU cleans BSS */
18276 - jmp checkCPUtype
18277 -1:
18278 + jnz checkCPUtype
18279 #endif /* CONFIG_SMP */
18280
18281 /*
18282 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
18283 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
18284 movl %eax,%ss # after changing gdt.
18285
18286 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
18287 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
18288 movl %eax,%ds
18289 movl %eax,%es
18290
18291 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
18292 */
18293 cmpb $0,ready
18294 jne 1f
18295 - movl $per_cpu__gdt_page,%eax
18296 + movl $cpu_gdt_table,%eax
18297 movl $per_cpu__stack_canary,%ecx
18298 +#ifdef CONFIG_SMP
18299 + addl $__per_cpu_load,%ecx
18300 +#endif
18301 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
18302 shrl $16, %ecx
18303 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
18304 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
18305 1:
18306 -#endif
18307 movl $(__KERNEL_STACK_CANARY),%eax
18308 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18309 + movl $(__USER_DS),%eax
18310 +#else
18311 + xorl %eax,%eax
18312 +#endif
18313 movl %eax,%gs
18314
18315 xorl %eax,%eax # Clear LDT
18316 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
18317
18318 cld # gcc2 wants the direction flag cleared at all times
18319 pushl $0 # fake return address for unwinder
18320 -#ifdef CONFIG_SMP
18321 - movb ready, %cl
18322 movb $1, ready
18323 - cmpb $0,%cl # the first CPU calls start_kernel
18324 - je 1f
18325 - movl (stack_start), %esp
18326 -1:
18327 -#endif /* CONFIG_SMP */
18328 jmp *(initial_code)
18329
18330 /*
18331 @@ -546,22 +631,22 @@ early_page_fault:
18332 jmp early_fault
18333
18334 early_fault:
18335 - cld
18336 #ifdef CONFIG_PRINTK
18337 + cmpl $1,%ss:early_recursion_flag
18338 + je hlt_loop
18339 + incl %ss:early_recursion_flag
18340 + cld
18341 pusha
18342 movl $(__KERNEL_DS),%eax
18343 movl %eax,%ds
18344 movl %eax,%es
18345 - cmpl $2,early_recursion_flag
18346 - je hlt_loop
18347 - incl early_recursion_flag
18348 movl %cr2,%eax
18349 pushl %eax
18350 pushl %edx /* trapno */
18351 pushl $fault_msg
18352 call printk
18353 +; call dump_stack
18354 #endif
18355 - call dump_stack
18356 hlt_loop:
18357 hlt
18358 jmp hlt_loop
18359 @@ -569,8 +654,11 @@ hlt_loop:
18360 /* This is the default interrupt "handler" :-) */
18361 ALIGN
18362 ignore_int:
18363 - cld
18364 #ifdef CONFIG_PRINTK
18365 + cmpl $2,%ss:early_recursion_flag
18366 + je hlt_loop
18367 + incl %ss:early_recursion_flag
18368 + cld
18369 pushl %eax
18370 pushl %ecx
18371 pushl %edx
18372 @@ -579,9 +667,6 @@ ignore_int:
18373 movl $(__KERNEL_DS),%eax
18374 movl %eax,%ds
18375 movl %eax,%es
18376 - cmpl $2,early_recursion_flag
18377 - je hlt_loop
18378 - incl early_recursion_flag
18379 pushl 16(%esp)
18380 pushl 24(%esp)
18381 pushl 32(%esp)
18382 @@ -600,6 +685,8 @@ ignore_int:
18383 #endif
18384 iret
18385
18386 +#include "verify_cpu.S"
18387 +
18388 __REFDATA
18389 .align 4
18390 ENTRY(initial_code)
18391 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
18392 /*
18393 * BSS section
18394 */
18395 -__PAGE_ALIGNED_BSS
18396 - .align PAGE_SIZE_asm
18397 #ifdef CONFIG_X86_PAE
18398 +.section .swapper_pg_pmd,"a",@progbits
18399 swapper_pg_pmd:
18400 .fill 1024*KPMDS,4,0
18401 #else
18402 +.section .swapper_pg_dir,"a",@progbits
18403 ENTRY(swapper_pg_dir)
18404 .fill 1024,4,0
18405 #endif
18406 +.section .swapper_pg_fixmap,"a",@progbits
18407 swapper_pg_fixmap:
18408 .fill 1024,4,0
18409 #ifdef CONFIG_X86_TRAMPOLINE
18410 +.section .trampoline_pg_dir,"a",@progbits
18411 ENTRY(trampoline_pg_dir)
18412 +#ifdef CONFIG_X86_PAE
18413 + .fill 4,8,0
18414 +#else
18415 .fill 1024,4,0
18416 #endif
18417 +#endif
18418 +
18419 +.section .empty_zero_page,"a",@progbits
18420 ENTRY(empty_zero_page)
18421 .fill 4096,1,0
18422
18423 /*
18424 + * The IDT has to be page-aligned to simplify the Pentium
18425 + * F0 0F bug workaround.. We have a special link segment
18426 + * for this.
18427 + */
18428 +.section .idt,"a",@progbits
18429 +ENTRY(idt_table)
18430 + .fill 256,8,0
18431 +
18432 +/*
18433 * This starts the data section.
18434 */
18435 #ifdef CONFIG_X86_PAE
18436 -__PAGE_ALIGNED_DATA
18437 - /* Page-aligned for the benefit of paravirt? */
18438 - .align PAGE_SIZE_asm
18439 +.section .swapper_pg_dir,"a",@progbits
18440 +
18441 ENTRY(swapper_pg_dir)
18442 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
18443 # if KPMDS == 3
18444 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
18445 # error "Kernel PMDs should be 1, 2 or 3"
18446 # endif
18447 .align PAGE_SIZE_asm /* needs to be page-sized too */
18448 +
18449 +#ifdef CONFIG_PAX_PER_CPU_PGD
18450 +ENTRY(cpu_pgd)
18451 + .rept NR_CPUS
18452 + .fill 4,8,0
18453 + .endr
18454 +#endif
18455 +
18456 #endif
18457
18458 .data
18459 +.balign 4
18460 ENTRY(stack_start)
18461 - .long init_thread_union+THREAD_SIZE
18462 - .long __BOOT_DS
18463 + .long init_thread_union+THREAD_SIZE-8
18464
18465 ready: .byte 0
18466
18467 +.section .rodata,"a",@progbits
18468 early_recursion_flag:
18469 .long 0
18470
18471 @@ -697,7 +809,7 @@ fault_msg:
18472 .word 0 # 32 bit align gdt_desc.address
18473 boot_gdt_descr:
18474 .word __BOOT_DS+7
18475 - .long boot_gdt - __PAGE_OFFSET
18476 + .long pa(boot_gdt)
18477
18478 .word 0 # 32-bit align idt_desc.address
18479 idt_descr:
18480 @@ -708,7 +820,7 @@ idt_descr:
18481 .word 0 # 32 bit align gdt_desc.address
18482 ENTRY(early_gdt_descr)
18483 .word GDT_ENTRIES*8-1
18484 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
18485 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
18486
18487 /*
18488 * The boot_gdt must mirror the equivalent in setup.S and is
18489 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
18490 .align L1_CACHE_BYTES
18491 ENTRY(boot_gdt)
18492 .fill GDT_ENTRY_BOOT_CS,8,0
18493 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
18494 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
18495 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
18496 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
18497 +
18498 + .align PAGE_SIZE_asm
18499 +ENTRY(cpu_gdt_table)
18500 + .rept NR_CPUS
18501 + .quad 0x0000000000000000 /* NULL descriptor */
18502 + .quad 0x0000000000000000 /* 0x0b reserved */
18503 + .quad 0x0000000000000000 /* 0x13 reserved */
18504 + .quad 0x0000000000000000 /* 0x1b reserved */
18505 +
18506 +#ifdef CONFIG_PAX_KERNEXEC
18507 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
18508 +#else
18509 + .quad 0x0000000000000000 /* 0x20 unused */
18510 +#endif
18511 +
18512 + .quad 0x0000000000000000 /* 0x28 unused */
18513 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
18514 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
18515 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
18516 + .quad 0x0000000000000000 /* 0x4b reserved */
18517 + .quad 0x0000000000000000 /* 0x53 reserved */
18518 + .quad 0x0000000000000000 /* 0x5b reserved */
18519 +
18520 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
18521 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
18522 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
18523 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
18524 +
18525 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
18526 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
18527 +
18528 + /*
18529 + * Segments used for calling PnP BIOS have byte granularity.
18530 + * The code segments and data segments have fixed 64k limits,
18531 + * the transfer segment sizes are set at run time.
18532 + */
18533 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
18534 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
18535 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
18536 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
18537 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
18538 +
18539 + /*
18540 + * The APM segments have byte granularity and their bases
18541 + * are set at run time. All have 64k limits.
18542 + */
18543 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
18544 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
18545 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
18546 +
18547 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
18548 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
18549 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
18550 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
18551 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
18552 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
18553 +
18554 + /* Be sure this is zeroed to avoid false validations in Xen */
18555 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
18556 + .endr
18557 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
18558 index 780cd92..758b2a6 100644
18559 --- a/arch/x86/kernel/head_64.S
18560 +++ b/arch/x86/kernel/head_64.S
18561 @@ -19,6 +19,8 @@
18562 #include <asm/cache.h>
18563 #include <asm/processor-flags.h>
18564 #include <asm/percpu.h>
18565 +#include <asm/cpufeature.h>
18566 +#include <asm/alternative-asm.h>
18567
18568 #ifdef CONFIG_PARAVIRT
18569 #include <asm/asm-offsets.h>
18570 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
18571 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
18572 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
18573 L3_START_KERNEL = pud_index(__START_KERNEL_map)
18574 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
18575 +L3_VMALLOC_START = pud_index(VMALLOC_START)
18576 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
18577 +L3_VMALLOC_END = pud_index(VMALLOC_END)
18578 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
18579 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
18580
18581 .text
18582 __HEAD
18583 @@ -85,35 +93,23 @@ startup_64:
18584 */
18585 addq %rbp, init_level4_pgt + 0(%rip)
18586 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
18587 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
18588 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
18589 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
18590 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
18591
18592 addq %rbp, level3_ident_pgt + 0(%rip)
18593 +#ifndef CONFIG_XEN
18594 + addq %rbp, level3_ident_pgt + 8(%rip)
18595 +#endif
18596
18597 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
18598 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
18599 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
18600 +
18601 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
18602 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
18603
18604 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
18605 -
18606 - /* Add an Identity mapping if I am above 1G */
18607 - leaq _text(%rip), %rdi
18608 - andq $PMD_PAGE_MASK, %rdi
18609 -
18610 - movq %rdi, %rax
18611 - shrq $PUD_SHIFT, %rax
18612 - andq $(PTRS_PER_PUD - 1), %rax
18613 - jz ident_complete
18614 -
18615 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
18616 - leaq level3_ident_pgt(%rip), %rbx
18617 - movq %rdx, 0(%rbx, %rax, 8)
18618 -
18619 - movq %rdi, %rax
18620 - shrq $PMD_SHIFT, %rax
18621 - andq $(PTRS_PER_PMD - 1), %rax
18622 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
18623 - leaq level2_spare_pgt(%rip), %rbx
18624 - movq %rdx, 0(%rbx, %rax, 8)
18625 -ident_complete:
18626 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
18627
18628 /*
18629 * Fixup the kernel text+data virtual addresses. Note that
18630 @@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
18631 * after the boot processor executes this code.
18632 */
18633
18634 - /* Enable PAE mode and PGE */
18635 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
18636 + /* Enable PAE mode and PSE/PGE */
18637 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18638 movq %rax, %cr4
18639
18640 /* Setup early boot stage 4 level pagetables. */
18641 @@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
18642 movl $MSR_EFER, %ecx
18643 rdmsr
18644 btsl $_EFER_SCE, %eax /* Enable System Call */
18645 - btl $20,%edi /* No Execute supported? */
18646 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
18647 jnc 1f
18648 btsl $_EFER_NX, %eax
18649 + leaq init_level4_pgt(%rip), %rdi
18650 +#ifndef CONFIG_EFI
18651 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
18652 +#endif
18653 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
18654 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
18655 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
18656 1: wrmsr /* Make changes effective */
18657
18658 /* Setup cr0 */
18659 @@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
18660 * jump. In addition we need to ensure %cs is set so we make this
18661 * a far return.
18662 */
18663 + pax_set_fptr_mask
18664 movq initial_code(%rip),%rax
18665 pushq $0 # fake return address to stop unwinder
18666 pushq $__KERNEL_CS # set correct cs
18667 @@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
18668 .quad x86_64_start_kernel
18669 ENTRY(initial_gs)
18670 .quad INIT_PER_CPU_VAR(irq_stack_union)
18671 - __FINITDATA
18672
18673 ENTRY(stack_start)
18674 .quad init_thread_union+THREAD_SIZE-8
18675 .word 0
18676 + __FINITDATA
18677
18678 bad_address:
18679 jmp bad_address
18680
18681 - .section ".init.text","ax"
18682 + __INIT
18683 #ifdef CONFIG_EARLY_PRINTK
18684 .globl early_idt_handlers
18685 early_idt_handlers:
18686 @@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
18687 #endif /* EARLY_PRINTK */
18688 1: hlt
18689 jmp 1b
18690 + .previous
18691
18692 #ifdef CONFIG_EARLY_PRINTK
18693 + __INITDATA
18694 early_recursion_flag:
18695 .long 0
18696 + .previous
18697
18698 + .section .rodata,"a",@progbits
18699 early_idt_msg:
18700 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
18701 early_idt_ripmsg:
18702 .asciz "RIP %s\n"
18703 + .previous
18704 #endif /* CONFIG_EARLY_PRINTK */
18705 - .previous
18706
18707 + .section .rodata,"a",@progbits
18708 #define NEXT_PAGE(name) \
18709 .balign PAGE_SIZE; \
18710 ENTRY(name)
18711 @@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
18712 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18713 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
18714 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18715 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
18716 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
18717 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
18718 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
18719 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
18720 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18721 .org init_level4_pgt + L4_START_KERNEL*8, 0
18722 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
18723 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
18724
18725 +#ifdef CONFIG_PAX_PER_CPU_PGD
18726 +NEXT_PAGE(cpu_pgd)
18727 + .rept NR_CPUS
18728 + .fill 512,8,0
18729 + .endr
18730 +#endif
18731 +
18732 NEXT_PAGE(level3_ident_pgt)
18733 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18734 +#ifdef CONFIG_XEN
18735 .fill 511,8,0
18736 +#else
18737 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
18738 + .fill 510,8,0
18739 +#endif
18740 +
18741 +NEXT_PAGE(level3_vmalloc_start_pgt)
18742 + .fill 512,8,0
18743 +
18744 +NEXT_PAGE(level3_vmalloc_end_pgt)
18745 + .fill 512,8,0
18746 +
18747 +NEXT_PAGE(level3_vmemmap_pgt)
18748 + .fill L3_VMEMMAP_START,8,0
18749 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18750
18751 NEXT_PAGE(level3_kernel_pgt)
18752 .fill L3_START_KERNEL,8,0
18753 @@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
18754 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
18755 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18756
18757 +NEXT_PAGE(level2_vmemmap_pgt)
18758 + .fill 512,8,0
18759 +
18760 NEXT_PAGE(level2_fixmap_pgt)
18761 - .fill 506,8,0
18762 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18763 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
18764 - .fill 5,8,0
18765 + .fill 507,8,0
18766 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
18767 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
18768 + .fill 4,8,0
18769
18770 -NEXT_PAGE(level1_fixmap_pgt)
18771 +NEXT_PAGE(level1_vsyscall_pgt)
18772 .fill 512,8,0
18773
18774 -NEXT_PAGE(level2_ident_pgt)
18775 - /* Since I easily can, map the first 1G.
18776 + /* Since I easily can, map the first 2G.
18777 * Don't set NX because code runs from these pages.
18778 */
18779 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
18780 +NEXT_PAGE(level2_ident_pgt)
18781 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
18782
18783 NEXT_PAGE(level2_kernel_pgt)
18784 /*
18785 @@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
18786 * If you want to increase this then increase MODULES_VADDR
18787 * too.)
18788 */
18789 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
18790 - KERNEL_IMAGE_SIZE/PMD_SIZE)
18791 -
18792 -NEXT_PAGE(level2_spare_pgt)
18793 - .fill 512, 8, 0
18794 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
18795
18796 #undef PMDS
18797 #undef NEXT_PAGE
18798
18799 - .data
18800 + .align PAGE_SIZE
18801 +ENTRY(cpu_gdt_table)
18802 + .rept NR_CPUS
18803 + .quad 0x0000000000000000 /* NULL descriptor */
18804 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
18805 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
18806 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
18807 + .quad 0x00cffb000000ffff /* __USER32_CS */
18808 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
18809 + .quad 0x00affb000000ffff /* __USER_CS */
18810 +
18811 +#ifdef CONFIG_PAX_KERNEXEC
18812 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
18813 +#else
18814 + .quad 0x0 /* unused */
18815 +#endif
18816 +
18817 + .quad 0,0 /* TSS */
18818 + .quad 0,0 /* LDT */
18819 + .quad 0,0,0 /* three TLS descriptors */
18820 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
18821 + /* asm/segment.h:GDT_ENTRIES must match this */
18822 +
18823 + /* zero the remaining page */
18824 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
18825 + .endr
18826 +
18827 .align 16
18828 .globl early_gdt_descr
18829 early_gdt_descr:
18830 .word GDT_ENTRIES*8-1
18831 early_gdt_descr_base:
18832 - .quad INIT_PER_CPU_VAR(gdt_page)
18833 + .quad cpu_gdt_table
18834
18835 ENTRY(phys_base)
18836 /* This must match the first entry in level2_kernel_pgt */
18837 .quad 0x0000000000000000
18838
18839 #include "../../x86/xen/xen-head.S"
18840 -
18841 - .section .bss, "aw", @nobits
18842 +
18843 + .section .rodata,"a",@progbits
18844 .align L1_CACHE_BYTES
18845 ENTRY(idt_table)
18846 - .skip IDT_ENTRIES * 16
18847 + .fill 512,8,0
18848
18849 __PAGE_ALIGNED_BSS
18850 .align PAGE_SIZE
18851 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
18852 index 9c3bd4a..e1d9b35 100644
18853 --- a/arch/x86/kernel/i386_ksyms_32.c
18854 +++ b/arch/x86/kernel/i386_ksyms_32.c
18855 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
18856 EXPORT_SYMBOL(cmpxchg8b_emu);
18857 #endif
18858
18859 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
18860 +
18861 /* Networking helper routines. */
18862 EXPORT_SYMBOL(csum_partial_copy_generic);
18863 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
18864 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
18865
18866 EXPORT_SYMBOL(__get_user_1);
18867 EXPORT_SYMBOL(__get_user_2);
18868 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
18869
18870 EXPORT_SYMBOL(csum_partial);
18871 EXPORT_SYMBOL(empty_zero_page);
18872 +
18873 +#ifdef CONFIG_PAX_KERNEXEC
18874 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
18875 +#endif
18876 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
18877 index f2f8540..d845509 100644
18878 --- a/arch/x86/kernel/i387.c
18879 +++ b/arch/x86/kernel/i387.c
18880 @@ -176,6 +176,9 @@ int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
18881
18882 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18883 unsigned int pos, unsigned int count,
18884 + void *kbuf, void __user *ubuf) __size_overflow(4);
18885 +int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18886 + unsigned int pos, unsigned int count,
18887 void *kbuf, void __user *ubuf)
18888 {
18889 int ret;
18890 @@ -193,6 +196,9 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18891
18892 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
18893 unsigned int pos, unsigned int count,
18894 + const void *kbuf, const void __user *ubuf) __size_overflow(4);
18895 +int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
18896 + unsigned int pos, unsigned int count,
18897 const void *kbuf, const void __user *ubuf)
18898 {
18899 int ret;
18900 @@ -365,6 +371,9 @@ static void convert_to_fxsr(struct task_struct *tsk,
18901
18902 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18903 unsigned int pos, unsigned int count,
18904 + void *kbuf, void __user *ubuf) __size_overflow(3,4);
18905 +int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18906 + unsigned int pos, unsigned int count,
18907 void *kbuf, void __user *ubuf)
18908 {
18909 struct user_i387_ia32_struct env;
18910 @@ -395,6 +404,9 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18911
18912 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
18913 unsigned int pos, unsigned int count,
18914 + const void *kbuf, const void __user *ubuf) __size_overflow(3,4);
18915 +int fpregs_set(struct task_struct *target, const struct user_regset *regset,
18916 + unsigned int pos, unsigned int count,
18917 const void *kbuf, const void __user *ubuf)
18918 {
18919 struct user_i387_ia32_struct env;
18920 @@ -540,6 +552,8 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
18921 }
18922
18923 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
18924 + unsigned int size) __size_overflow(2);
18925 +static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
18926 unsigned int size)
18927 {
18928 struct task_struct *tsk = current;
18929 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
18930 index df89102..a244320 100644
18931 --- a/arch/x86/kernel/i8259.c
18932 +++ b/arch/x86/kernel/i8259.c
18933 @@ -208,7 +208,7 @@ spurious_8259A_irq:
18934 "spurious 8259A interrupt: IRQ%d.\n", irq);
18935 spurious_irq_mask |= irqmask;
18936 }
18937 - atomic_inc(&irq_err_count);
18938 + atomic_inc_unchecked(&irq_err_count);
18939 /*
18940 * Theoretically we do not have to handle this IRQ,
18941 * but in Linux this does not cause problems and is
18942 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
18943 index 3a54dcb..1c22348 100644
18944 --- a/arch/x86/kernel/init_task.c
18945 +++ b/arch/x86/kernel/init_task.c
18946 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
18947 * way process stacks are handled. This is done by having a special
18948 * "init_task" linker map entry..
18949 */
18950 -union thread_union init_thread_union __init_task_data =
18951 - { INIT_THREAD_INFO(init_task) };
18952 +union thread_union init_thread_union __init_task_data;
18953
18954 /*
18955 * Initial task structure.
18956 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
18957 * section. Since TSS's are completely CPU-local, we want them
18958 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
18959 */
18960 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
18961 -
18962 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
18963 +EXPORT_SYMBOL(init_tss);
18964 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
18965 index 99c4d30..74c84e9 100644
18966 --- a/arch/x86/kernel/ioport.c
18967 +++ b/arch/x86/kernel/ioport.c
18968 @@ -6,6 +6,7 @@
18969 #include <linux/sched.h>
18970 #include <linux/kernel.h>
18971 #include <linux/capability.h>
18972 +#include <linux/security.h>
18973 #include <linux/errno.h>
18974 #include <linux/types.h>
18975 #include <linux/ioport.h>
18976 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18977
18978 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
18979 return -EINVAL;
18980 +#ifdef CONFIG_GRKERNSEC_IO
18981 + if (turn_on && grsec_disable_privio) {
18982 + gr_handle_ioperm();
18983 + return -EPERM;
18984 + }
18985 +#endif
18986 if (turn_on && !capable(CAP_SYS_RAWIO))
18987 return -EPERM;
18988
18989 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18990 * because the ->io_bitmap_max value must match the bitmap
18991 * contents:
18992 */
18993 - tss = &per_cpu(init_tss, get_cpu());
18994 + tss = init_tss + get_cpu();
18995
18996 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
18997
18998 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
18999 return -EINVAL;
19000 /* Trying to gain more privileges? */
19001 if (level > old) {
19002 +#ifdef CONFIG_GRKERNSEC_IO
19003 + if (grsec_disable_privio) {
19004 + gr_handle_iopl();
19005 + return -EPERM;
19006 + }
19007 +#endif
19008 if (!capable(CAP_SYS_RAWIO))
19009 return -EPERM;
19010 }
19011 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
19012 index 04bbd52..83a07d9 100644
19013 --- a/arch/x86/kernel/irq.c
19014 +++ b/arch/x86/kernel/irq.c
19015 @@ -15,7 +15,7 @@
19016 #include <asm/mce.h>
19017 #include <asm/hw_irq.h>
19018
19019 -atomic_t irq_err_count;
19020 +atomic_unchecked_t irq_err_count;
19021
19022 /* Function pointer for generic interrupt vector handling */
19023 void (*generic_interrupt_extension)(void) = NULL;
19024 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
19025 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
19026 seq_printf(p, " Machine check polls\n");
19027 #endif
19028 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
19029 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
19030 #if defined(CONFIG_X86_IO_APIC)
19031 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
19032 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
19033 #endif
19034 return 0;
19035 }
19036 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
19037
19038 u64 arch_irq_stat(void)
19039 {
19040 - u64 sum = atomic_read(&irq_err_count);
19041 + u64 sum = atomic_read_unchecked(&irq_err_count);
19042
19043 #ifdef CONFIG_X86_IO_APIC
19044 - sum += atomic_read(&irq_mis_count);
19045 + sum += atomic_read_unchecked(&irq_mis_count);
19046 #endif
19047 return sum;
19048 }
19049 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
19050 index 7d35d0f..03f1d52 100644
19051 --- a/arch/x86/kernel/irq_32.c
19052 +++ b/arch/x86/kernel/irq_32.c
19053 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
19054 __asm__ __volatile__("andl %%esp,%0" :
19055 "=r" (sp) : "0" (THREAD_SIZE - 1));
19056
19057 - return sp < (sizeof(struct thread_info) + STACK_WARN);
19058 + return sp < STACK_WARN;
19059 }
19060
19061 static void print_stack_overflow(void)
19062 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
19063 * per-CPU IRQ handling contexts (thread information and stack)
19064 */
19065 union irq_ctx {
19066 - struct thread_info tinfo;
19067 - u32 stack[THREAD_SIZE/sizeof(u32)];
19068 -} __attribute__((aligned(PAGE_SIZE)));
19069 + unsigned long previous_esp;
19070 + u32 stack[THREAD_SIZE/sizeof(u32)];
19071 +} __attribute__((aligned(THREAD_SIZE)));
19072
19073 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
19074 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
19075 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
19076 static inline int
19077 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19078 {
19079 - union irq_ctx *curctx, *irqctx;
19080 + union irq_ctx *irqctx;
19081 u32 *isp, arg1, arg2;
19082
19083 - curctx = (union irq_ctx *) current_thread_info();
19084 irqctx = __get_cpu_var(hardirq_ctx);
19085
19086 /*
19087 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19088 * handler) we can't do that and just have to keep using the
19089 * current stack (which is the irq stack already after all)
19090 */
19091 - if (unlikely(curctx == irqctx))
19092 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
19093 return 0;
19094
19095 /* build the stack frame on the IRQ stack */
19096 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
19097 - irqctx->tinfo.task = curctx->tinfo.task;
19098 - irqctx->tinfo.previous_esp = current_stack_pointer;
19099 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
19100 + irqctx->previous_esp = current_stack_pointer;
19101
19102 - /*
19103 - * Copy the softirq bits in preempt_count so that the
19104 - * softirq checks work in the hardirq context.
19105 - */
19106 - irqctx->tinfo.preempt_count =
19107 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
19108 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
19109 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19110 + __set_fs(MAKE_MM_SEG(0));
19111 +#endif
19112
19113 if (unlikely(overflow))
19114 call_on_stack(print_stack_overflow, isp);
19115 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19116 : "0" (irq), "1" (desc), "2" (isp),
19117 "D" (desc->handle_irq)
19118 : "memory", "cc", "ecx");
19119 +
19120 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19121 + __set_fs(current_thread_info()->addr_limit);
19122 +#endif
19123 +
19124 return 1;
19125 }
19126
19127 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19128 */
19129 void __cpuinit irq_ctx_init(int cpu)
19130 {
19131 - union irq_ctx *irqctx;
19132 -
19133 if (per_cpu(hardirq_ctx, cpu))
19134 return;
19135
19136 - irqctx = &per_cpu(hardirq_stack, cpu);
19137 - irqctx->tinfo.task = NULL;
19138 - irqctx->tinfo.exec_domain = NULL;
19139 - irqctx->tinfo.cpu = cpu;
19140 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
19141 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
19142 -
19143 - per_cpu(hardirq_ctx, cpu) = irqctx;
19144 -
19145 - irqctx = &per_cpu(softirq_stack, cpu);
19146 - irqctx->tinfo.task = NULL;
19147 - irqctx->tinfo.exec_domain = NULL;
19148 - irqctx->tinfo.cpu = cpu;
19149 - irqctx->tinfo.preempt_count = 0;
19150 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
19151 -
19152 - per_cpu(softirq_ctx, cpu) = irqctx;
19153 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
19154 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
19155
19156 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
19157 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
19158 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
19159 asmlinkage void do_softirq(void)
19160 {
19161 unsigned long flags;
19162 - struct thread_info *curctx;
19163 union irq_ctx *irqctx;
19164 u32 *isp;
19165
19166 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
19167 local_irq_save(flags);
19168
19169 if (local_softirq_pending()) {
19170 - curctx = current_thread_info();
19171 irqctx = __get_cpu_var(softirq_ctx);
19172 - irqctx->tinfo.task = curctx->task;
19173 - irqctx->tinfo.previous_esp = current_stack_pointer;
19174 + irqctx->previous_esp = current_stack_pointer;
19175
19176 /* build the stack frame on the softirq stack */
19177 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
19178 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
19179 +
19180 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19181 + __set_fs(MAKE_MM_SEG(0));
19182 +#endif
19183
19184 call_on_stack(__do_softirq, isp);
19185 +
19186 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19187 + __set_fs(current_thread_info()->addr_limit);
19188 +#endif
19189 +
19190 /*
19191 * Shouldnt happen, we returned above if in_interrupt():
19192 */
19193 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
19194 index 8d82a77..0baf312 100644
19195 --- a/arch/x86/kernel/kgdb.c
19196 +++ b/arch/x86/kernel/kgdb.c
19197 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
19198
19199 /* clear the trace bit */
19200 linux_regs->flags &= ~X86_EFLAGS_TF;
19201 - atomic_set(&kgdb_cpu_doing_single_step, -1);
19202 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
19203
19204 /* set the trace bit if we're stepping */
19205 if (remcomInBuffer[0] == 's') {
19206 linux_regs->flags |= X86_EFLAGS_TF;
19207 kgdb_single_step = 1;
19208 - atomic_set(&kgdb_cpu_doing_single_step,
19209 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
19210 raw_smp_processor_id());
19211 }
19212
19213 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
19214 break;
19215
19216 case DIE_DEBUG:
19217 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
19218 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
19219 raw_smp_processor_id()) {
19220 if (user_mode(regs))
19221 return single_step_cont(regs, args);
19222 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
19223 return instruction_pointer(regs);
19224 }
19225
19226 -struct kgdb_arch arch_kgdb_ops = {
19227 +const struct kgdb_arch arch_kgdb_ops = {
19228 /* Breakpoint instruction: */
19229 .gdb_bpt_instr = { 0xcc },
19230 .flags = KGDB_HW_BREAKPOINT,
19231 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
19232 index 7a67820..70ea187 100644
19233 --- a/arch/x86/kernel/kprobes.c
19234 +++ b/arch/x86/kernel/kprobes.c
19235 @@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
19236 char op;
19237 s32 raddr;
19238 } __attribute__((packed)) * jop;
19239 - jop = (struct __arch_jmp_op *)from;
19240 +
19241 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
19242 +
19243 + pax_open_kernel();
19244 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
19245 jop->op = RELATIVEJUMP_INSTRUCTION;
19246 + pax_close_kernel();
19247 }
19248
19249 /*
19250 @@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
19251 kprobe_opcode_t opcode;
19252 kprobe_opcode_t *orig_opcodes = opcodes;
19253
19254 - if (search_exception_tables((unsigned long)opcodes))
19255 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
19256 return 0; /* Page fault may occur on this address. */
19257
19258 retry:
19259 @@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
19260 disp = (u8 *) p->addr + *((s32 *) insn) -
19261 (u8 *) p->ainsn.insn;
19262 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
19263 + pax_open_kernel();
19264 *(s32 *)insn = (s32) disp;
19265 + pax_close_kernel();
19266 }
19267 }
19268 #endif
19269 @@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
19270
19271 static void __kprobes arch_copy_kprobe(struct kprobe *p)
19272 {
19273 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19274 + pax_open_kernel();
19275 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19276 + pax_close_kernel();
19277
19278 fix_riprel(p);
19279
19280 - if (can_boost(p->addr))
19281 + if (can_boost(ktla_ktva(p->addr)))
19282 p->ainsn.boostable = 0;
19283 else
19284 p->ainsn.boostable = -1;
19285
19286 - p->opcode = *p->addr;
19287 + p->opcode = *(ktla_ktva(p->addr));
19288 }
19289
19290 int __kprobes arch_prepare_kprobe(struct kprobe *p)
19291 @@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
19292 if (p->opcode == BREAKPOINT_INSTRUCTION)
19293 regs->ip = (unsigned long)p->addr;
19294 else
19295 - regs->ip = (unsigned long)p->ainsn.insn;
19296 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19297 }
19298
19299 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
19300 @@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
19301 if (p->ainsn.boostable == 1 && !p->post_handler) {
19302 /* Boost up -- we can execute copied instructions directly */
19303 reset_current_kprobe();
19304 - regs->ip = (unsigned long)p->ainsn.insn;
19305 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19306 preempt_enable_no_resched();
19307 return;
19308 }
19309 @@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
19310 struct kprobe_ctlblk *kcb;
19311
19312 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
19313 - if (*addr != BREAKPOINT_INSTRUCTION) {
19314 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
19315 /*
19316 * The breakpoint instruction was removed right
19317 * after we hit it. Another cpu has removed
19318 @@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
19319 /* Skip orig_ax, ip, cs */
19320 " addq $24, %rsp\n"
19321 " popfq\n"
19322 +#ifdef KERNEXEC_PLUGIN
19323 + " btsq $63,(%rsp)\n"
19324 +#endif
19325 #else
19326 " pushf\n"
19327 /*
19328 @@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
19329 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
19330 {
19331 unsigned long *tos = stack_addr(regs);
19332 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
19333 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
19334 unsigned long orig_ip = (unsigned long)p->addr;
19335 kprobe_opcode_t *insn = p->ainsn.insn;
19336
19337 @@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
19338 struct die_args *args = data;
19339 int ret = NOTIFY_DONE;
19340
19341 - if (args->regs && user_mode_vm(args->regs))
19342 + if (args->regs && user_mode(args->regs))
19343 return ret;
19344
19345 switch (val) {
19346 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
19347 index 63b0ec8..6d92227 100644
19348 --- a/arch/x86/kernel/kvm.c
19349 +++ b/arch/x86/kernel/kvm.c
19350 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
19351 pv_mmu_ops.set_pud = kvm_set_pud;
19352 #if PAGETABLE_LEVELS == 4
19353 pv_mmu_ops.set_pgd = kvm_set_pgd;
19354 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
19355 #endif
19356 #endif
19357 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
19358 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
19359 index ec6ef60..d784780 100644
19360 --- a/arch/x86/kernel/ldt.c
19361 +++ b/arch/x86/kernel/ldt.c
19362 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
19363 if (reload) {
19364 #ifdef CONFIG_SMP
19365 preempt_disable();
19366 - load_LDT(pc);
19367 + load_LDT_nolock(pc);
19368 if (!cpumask_equal(mm_cpumask(current->mm),
19369 cpumask_of(smp_processor_id())))
19370 smp_call_function(flush_ldt, current->mm, 1);
19371 preempt_enable();
19372 #else
19373 - load_LDT(pc);
19374 + load_LDT_nolock(pc);
19375 #endif
19376 }
19377 if (oldsize) {
19378 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
19379 return err;
19380
19381 for (i = 0; i < old->size; i++)
19382 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
19383 + write_ldt_entry(new->ldt, i, old->ldt + i);
19384 return 0;
19385 }
19386
19387 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
19388 retval = copy_ldt(&mm->context, &old_mm->context);
19389 mutex_unlock(&old_mm->context.lock);
19390 }
19391 +
19392 + if (tsk == current) {
19393 + mm->context.vdso = 0;
19394 +
19395 +#ifdef CONFIG_X86_32
19396 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19397 + mm->context.user_cs_base = 0UL;
19398 + mm->context.user_cs_limit = ~0UL;
19399 +
19400 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
19401 + cpus_clear(mm->context.cpu_user_cs_mask);
19402 +#endif
19403 +
19404 +#endif
19405 +#endif
19406 +
19407 + }
19408 +
19409 return retval;
19410 }
19411
19412 @@ -140,6 +158,7 @@ void destroy_context(struct mm_struct *mm)
19413 }
19414 }
19415
19416 +static int read_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
19417 static int read_ldt(void __user *ptr, unsigned long bytecount)
19418 {
19419 int err;
19420 @@ -229,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
19421 }
19422 }
19423
19424 +#ifdef CONFIG_PAX_SEGMEXEC
19425 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
19426 + error = -EINVAL;
19427 + goto out_unlock;
19428 + }
19429 +#endif
19430 +
19431 fill_ldt(&ldt, &ldt_info);
19432 if (oldmode)
19433 ldt.avl = 0;
19434 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
19435 index c1c429d..f02eaf9 100644
19436 --- a/arch/x86/kernel/machine_kexec_32.c
19437 +++ b/arch/x86/kernel/machine_kexec_32.c
19438 @@ -26,7 +26,7 @@
19439 #include <asm/system.h>
19440 #include <asm/cacheflush.h>
19441
19442 -static void set_idt(void *newidt, __u16 limit)
19443 +static void set_idt(struct desc_struct *newidt, __u16 limit)
19444 {
19445 struct desc_ptr curidt;
19446
19447 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
19448 }
19449
19450
19451 -static void set_gdt(void *newgdt, __u16 limit)
19452 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
19453 {
19454 struct desc_ptr curgdt;
19455
19456 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
19457 }
19458
19459 control_page = page_address(image->control_code_page);
19460 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
19461 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
19462
19463 relocate_kernel_ptr = control_page;
19464 page_list[PA_CONTROL_PAGE] = __pa(control_page);
19465 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
19466 index 1e47679..e73449d 100644
19467 --- a/arch/x86/kernel/microcode_amd.c
19468 +++ b/arch/x86/kernel/microcode_amd.c
19469 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
19470 uci->mc = NULL;
19471 }
19472
19473 -static struct microcode_ops microcode_amd_ops = {
19474 +static const struct microcode_ops microcode_amd_ops = {
19475 .request_microcode_user = request_microcode_user,
19476 .request_microcode_fw = request_microcode_fw,
19477 .collect_cpu_info = collect_cpu_info_amd,
19478 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
19479 .microcode_fini_cpu = microcode_fini_cpu_amd,
19480 };
19481
19482 -struct microcode_ops * __init init_amd_microcode(void)
19483 +const struct microcode_ops * __init init_amd_microcode(void)
19484 {
19485 return &microcode_amd_ops;
19486 }
19487 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
19488 index 378e9a8..b5a6ea9 100644
19489 --- a/arch/x86/kernel/microcode_core.c
19490 +++ b/arch/x86/kernel/microcode_core.c
19491 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
19492
19493 #define MICROCODE_VERSION "2.00"
19494
19495 -static struct microcode_ops *microcode_ops;
19496 +static const struct microcode_ops *microcode_ops;
19497
19498 /*
19499 * Synchronization.
19500 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
19501 index 0d334dd..5a709b5 100644
19502 --- a/arch/x86/kernel/microcode_intel.c
19503 +++ b/arch/x86/kernel/microcode_intel.c
19504 @@ -441,15 +441,16 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
19505 return ret;
19506 }
19507
19508 +static int get_ucode_user(void *to, const void *from, size_t n) __size_overflow(3);
19509 static int get_ucode_user(void *to, const void *from, size_t n)
19510 {
19511 - return copy_from_user(to, from, n);
19512 + return copy_from_user(to, (const void __force_user *)from, n);
19513 }
19514
19515 static enum ucode_state
19516 request_microcode_user(int cpu, const void __user *buf, size_t size)
19517 {
19518 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
19519 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
19520 }
19521
19522 static void microcode_fini_cpu(int cpu)
19523 @@ -460,7 +461,7 @@ static void microcode_fini_cpu(int cpu)
19524 uci->mc = NULL;
19525 }
19526
19527 -static struct microcode_ops microcode_intel_ops = {
19528 +static const struct microcode_ops microcode_intel_ops = {
19529 .request_microcode_user = request_microcode_user,
19530 .request_microcode_fw = request_microcode_fw,
19531 .collect_cpu_info = collect_cpu_info,
19532 @@ -468,7 +469,7 @@ static struct microcode_ops microcode_intel_ops = {
19533 .microcode_fini_cpu = microcode_fini_cpu,
19534 };
19535
19536 -struct microcode_ops * __init init_intel_microcode(void)
19537 +const struct microcode_ops * __init init_intel_microcode(void)
19538 {
19539 return &microcode_intel_ops;
19540 }
19541 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
19542 index 89f386f..9028f51 100644
19543 --- a/arch/x86/kernel/module.c
19544 +++ b/arch/x86/kernel/module.c
19545 @@ -34,7 +34,7 @@
19546 #define DEBUGP(fmt...)
19547 #endif
19548
19549 -void *module_alloc(unsigned long size)
19550 +static void *__module_alloc(unsigned long size, pgprot_t prot)
19551 {
19552 struct vm_struct *area;
19553
19554 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
19555 if (!area)
19556 return NULL;
19557
19558 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
19559 - PAGE_KERNEL_EXEC);
19560 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
19561 +}
19562 +
19563 +void *module_alloc(unsigned long size)
19564 +{
19565 +
19566 +#ifdef CONFIG_PAX_KERNEXEC
19567 + return __module_alloc(size, PAGE_KERNEL);
19568 +#else
19569 + return __module_alloc(size, PAGE_KERNEL_EXEC);
19570 +#endif
19571 +
19572 }
19573
19574 /* Free memory returned from module_alloc */
19575 @@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
19576 vfree(module_region);
19577 }
19578
19579 +#ifdef CONFIG_PAX_KERNEXEC
19580 +#ifdef CONFIG_X86_32
19581 +void *module_alloc_exec(unsigned long size)
19582 +{
19583 + struct vm_struct *area;
19584 +
19585 + if (size == 0)
19586 + return NULL;
19587 +
19588 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
19589 + return area ? area->addr : NULL;
19590 +}
19591 +EXPORT_SYMBOL(module_alloc_exec);
19592 +
19593 +void module_free_exec(struct module *mod, void *module_region)
19594 +{
19595 + vunmap(module_region);
19596 +}
19597 +EXPORT_SYMBOL(module_free_exec);
19598 +#else
19599 +void module_free_exec(struct module *mod, void *module_region)
19600 +{
19601 + module_free(mod, module_region);
19602 +}
19603 +EXPORT_SYMBOL(module_free_exec);
19604 +
19605 +void *module_alloc_exec(unsigned long size)
19606 +{
19607 + return __module_alloc(size, PAGE_KERNEL_RX);
19608 +}
19609 +EXPORT_SYMBOL(module_alloc_exec);
19610 +#endif
19611 +#endif
19612 +
19613 /* We don't need anything special. */
19614 int module_frob_arch_sections(Elf_Ehdr *hdr,
19615 Elf_Shdr *sechdrs,
19616 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19617 unsigned int i;
19618 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
19619 Elf32_Sym *sym;
19620 - uint32_t *location;
19621 + uint32_t *plocation, location;
19622
19623 DEBUGP("Applying relocate section %u to %u\n", relsec,
19624 sechdrs[relsec].sh_info);
19625 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
19626 /* This is where to make the change */
19627 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
19628 - + rel[i].r_offset;
19629 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
19630 + location = (uint32_t)plocation;
19631 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
19632 + plocation = ktla_ktva((void *)plocation);
19633 /* This is the symbol it is referring to. Note that all
19634 undefined symbols have been resolved. */
19635 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
19636 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19637 switch (ELF32_R_TYPE(rel[i].r_info)) {
19638 case R_386_32:
19639 /* We add the value into the location given */
19640 - *location += sym->st_value;
19641 + pax_open_kernel();
19642 + *plocation += sym->st_value;
19643 + pax_close_kernel();
19644 break;
19645 case R_386_PC32:
19646 /* Add the value, subtract its postition */
19647 - *location += sym->st_value - (uint32_t)location;
19648 + pax_open_kernel();
19649 + *plocation += sym->st_value - location;
19650 + pax_close_kernel();
19651 break;
19652 default:
19653 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
19654 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
19655 case R_X86_64_NONE:
19656 break;
19657 case R_X86_64_64:
19658 + pax_open_kernel();
19659 *(u64 *)loc = val;
19660 + pax_close_kernel();
19661 break;
19662 case R_X86_64_32:
19663 + pax_open_kernel();
19664 *(u32 *)loc = val;
19665 + pax_close_kernel();
19666 if (val != *(u32 *)loc)
19667 goto overflow;
19668 break;
19669 case R_X86_64_32S:
19670 + pax_open_kernel();
19671 *(s32 *)loc = val;
19672 + pax_close_kernel();
19673 if ((s64)val != *(s32 *)loc)
19674 goto overflow;
19675 break;
19676 case R_X86_64_PC32:
19677 val -= (u64)loc;
19678 + pax_open_kernel();
19679 *(u32 *)loc = val;
19680 + pax_close_kernel();
19681 +
19682 #if 0
19683 if ((s64)val != *(s32 *)loc)
19684 goto overflow;
19685 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
19686 index 3a7c5a4..9191528 100644
19687 --- a/arch/x86/kernel/paravirt-spinlocks.c
19688 +++ b/arch/x86/kernel/paravirt-spinlocks.c
19689 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
19690 __raw_spin_lock(lock);
19691 }
19692
19693 -struct pv_lock_ops pv_lock_ops = {
19694 +struct pv_lock_ops pv_lock_ops __read_only = {
19695 #ifdef CONFIG_SMP
19696 .spin_is_locked = __ticket_spin_is_locked,
19697 .spin_is_contended = __ticket_spin_is_contended,
19698 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
19699 index 1b1739d..dea6077 100644
19700 --- a/arch/x86/kernel/paravirt.c
19701 +++ b/arch/x86/kernel/paravirt.c
19702 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
19703 {
19704 return x;
19705 }
19706 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19707 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
19708 +#endif
19709
19710 void __init default_banner(void)
19711 {
19712 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
19713 * corresponding structure. */
19714 static void *get_call_destination(u8 type)
19715 {
19716 - struct paravirt_patch_template tmpl = {
19717 + const struct paravirt_patch_template tmpl = {
19718 .pv_init_ops = pv_init_ops,
19719 .pv_time_ops = pv_time_ops,
19720 .pv_cpu_ops = pv_cpu_ops,
19721 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
19722 .pv_lock_ops = pv_lock_ops,
19723 #endif
19724 };
19725 +
19726 + pax_track_stack();
19727 return *((void **)&tmpl + type);
19728 }
19729
19730 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
19731 if (opfunc == NULL)
19732 /* If there's no function, patch it with a ud2a (BUG) */
19733 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
19734 - else if (opfunc == _paravirt_nop)
19735 + else if (opfunc == (void *)_paravirt_nop)
19736 /* If the operation is a nop, then nop the callsite */
19737 ret = paravirt_patch_nop();
19738
19739 /* identity functions just return their single argument */
19740 - else if (opfunc == _paravirt_ident_32)
19741 + else if (opfunc == (void *)_paravirt_ident_32)
19742 ret = paravirt_patch_ident_32(insnbuf, len);
19743 - else if (opfunc == _paravirt_ident_64)
19744 + else if (opfunc == (void *)_paravirt_ident_64)
19745 ret = paravirt_patch_ident_64(insnbuf, len);
19746 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19747 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
19748 + ret = paravirt_patch_ident_64(insnbuf, len);
19749 +#endif
19750
19751 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
19752 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
19753 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
19754 if (insn_len > len || start == NULL)
19755 insn_len = len;
19756 else
19757 - memcpy(insnbuf, start, insn_len);
19758 + memcpy(insnbuf, ktla_ktva(start), insn_len);
19759
19760 return insn_len;
19761 }
19762 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
19763 preempt_enable();
19764 }
19765
19766 -struct pv_info pv_info = {
19767 +struct pv_info pv_info __read_only = {
19768 .name = "bare hardware",
19769 .paravirt_enabled = 0,
19770 .kernel_rpl = 0,
19771 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
19772 };
19773
19774 -struct pv_init_ops pv_init_ops = {
19775 +struct pv_init_ops pv_init_ops __read_only = {
19776 .patch = native_patch,
19777 };
19778
19779 -struct pv_time_ops pv_time_ops = {
19780 +struct pv_time_ops pv_time_ops __read_only = {
19781 .sched_clock = native_sched_clock,
19782 };
19783
19784 -struct pv_irq_ops pv_irq_ops = {
19785 +struct pv_irq_ops pv_irq_ops __read_only = {
19786 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
19787 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
19788 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
19789 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
19790 #endif
19791 };
19792
19793 -struct pv_cpu_ops pv_cpu_ops = {
19794 +struct pv_cpu_ops pv_cpu_ops __read_only = {
19795 .cpuid = native_cpuid,
19796 .get_debugreg = native_get_debugreg,
19797 .set_debugreg = native_set_debugreg,
19798 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
19799 .end_context_switch = paravirt_nop,
19800 };
19801
19802 -struct pv_apic_ops pv_apic_ops = {
19803 +struct pv_apic_ops pv_apic_ops __read_only = {
19804 #ifdef CONFIG_X86_LOCAL_APIC
19805 .startup_ipi_hook = paravirt_nop,
19806 #endif
19807 };
19808
19809 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
19810 +#ifdef CONFIG_X86_32
19811 +#ifdef CONFIG_X86_PAE
19812 +/* 64-bit pagetable entries */
19813 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
19814 +#else
19815 /* 32-bit pagetable entries */
19816 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
19817 +#endif
19818 #else
19819 /* 64-bit pagetable entries */
19820 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
19821 #endif
19822
19823 -struct pv_mmu_ops pv_mmu_ops = {
19824 +struct pv_mmu_ops pv_mmu_ops __read_only = {
19825
19826 .read_cr2 = native_read_cr2,
19827 .write_cr2 = native_write_cr2,
19828 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
19829 .make_pud = PTE_IDENT,
19830
19831 .set_pgd = native_set_pgd,
19832 + .set_pgd_batched = native_set_pgd_batched,
19833 #endif
19834 #endif /* PAGETABLE_LEVELS >= 3 */
19835
19836 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
19837 },
19838
19839 .set_fixmap = native_set_fixmap,
19840 +
19841 +#ifdef CONFIG_PAX_KERNEXEC
19842 + .pax_open_kernel = native_pax_open_kernel,
19843 + .pax_close_kernel = native_pax_close_kernel,
19844 +#endif
19845 +
19846 };
19847
19848 EXPORT_SYMBOL_GPL(pv_time_ops);
19849 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
19850 index 1a2d4b1..6a0dd55 100644
19851 --- a/arch/x86/kernel/pci-calgary_64.c
19852 +++ b/arch/x86/kernel/pci-calgary_64.c
19853 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
19854 free_pages((unsigned long)vaddr, get_order(size));
19855 }
19856
19857 -static struct dma_map_ops calgary_dma_ops = {
19858 +static const struct dma_map_ops calgary_dma_ops = {
19859 .alloc_coherent = calgary_alloc_coherent,
19860 .free_coherent = calgary_free_coherent,
19861 .map_sg = calgary_map_sg,
19862 diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
19863 index 6ac3931..42b4414 100644
19864 --- a/arch/x86/kernel/pci-dma.c
19865 +++ b/arch/x86/kernel/pci-dma.c
19866 @@ -14,7 +14,7 @@
19867
19868 static int forbid_dac __read_mostly;
19869
19870 -struct dma_map_ops *dma_ops;
19871 +const struct dma_map_ops *dma_ops;
19872 EXPORT_SYMBOL(dma_ops);
19873
19874 static int iommu_sac_force __read_mostly;
19875 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
19876
19877 int dma_supported(struct device *dev, u64 mask)
19878 {
19879 - struct dma_map_ops *ops = get_dma_ops(dev);
19880 + const struct dma_map_ops *ops = get_dma_ops(dev);
19881
19882 #ifdef CONFIG_PCI
19883 if (mask > 0xffffffff && forbid_dac > 0) {
19884 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
19885 index 1c76691..e3632db 100644
19886 --- a/arch/x86/kernel/pci-gart_64.c
19887 +++ b/arch/x86/kernel/pci-gart_64.c
19888 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
19889 return -1;
19890 }
19891
19892 -static struct dma_map_ops gart_dma_ops = {
19893 +static const struct dma_map_ops gart_dma_ops = {
19894 .map_sg = gart_map_sg,
19895 .unmap_sg = gart_unmap_sg,
19896 .map_page = gart_map_page,
19897 diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
19898 index a3933d4..c898869 100644
19899 --- a/arch/x86/kernel/pci-nommu.c
19900 +++ b/arch/x86/kernel/pci-nommu.c
19901 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
19902 flush_write_buffers();
19903 }
19904
19905 -struct dma_map_ops nommu_dma_ops = {
19906 +const struct dma_map_ops nommu_dma_ops = {
19907 .alloc_coherent = dma_generic_alloc_coherent,
19908 .free_coherent = nommu_free_coherent,
19909 .map_sg = nommu_map_sg,
19910 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
19911 index aaa6b78..4de1881 100644
19912 --- a/arch/x86/kernel/pci-swiotlb.c
19913 +++ b/arch/x86/kernel/pci-swiotlb.c
19914 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
19915 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
19916 }
19917
19918 -static struct dma_map_ops swiotlb_dma_ops = {
19919 +static const struct dma_map_ops swiotlb_dma_ops = {
19920 .mapping_error = swiotlb_dma_mapping_error,
19921 .alloc_coherent = x86_swiotlb_alloc_coherent,
19922 .free_coherent = swiotlb_free_coherent,
19923 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
19924 index fc6c84d..0312ca2 100644
19925 --- a/arch/x86/kernel/process.c
19926 +++ b/arch/x86/kernel/process.c
19927 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
19928
19929 void free_thread_info(struct thread_info *ti)
19930 {
19931 - free_thread_xstate(ti->task);
19932 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
19933 }
19934
19935 +static struct kmem_cache *task_struct_cachep;
19936 +
19937 void arch_task_cache_init(void)
19938 {
19939 - task_xstate_cachep =
19940 - kmem_cache_create("task_xstate", xstate_size,
19941 + /* create a slab on which task_structs can be allocated */
19942 + task_struct_cachep =
19943 + kmem_cache_create("task_struct", sizeof(struct task_struct),
19944 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
19945 +
19946 + task_xstate_cachep =
19947 + kmem_cache_create("task_xstate", xstate_size,
19948 __alignof__(union thread_xstate),
19949 - SLAB_PANIC | SLAB_NOTRACK, NULL);
19950 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
19951 +}
19952 +
19953 +struct task_struct *alloc_task_struct(void)
19954 +{
19955 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
19956 +}
19957 +
19958 +void free_task_struct(struct task_struct *task)
19959 +{
19960 + free_thread_xstate(task);
19961 + kmem_cache_free(task_struct_cachep, task);
19962 }
19963
19964 /*
19965 @@ -73,7 +90,7 @@ void exit_thread(void)
19966 unsigned long *bp = t->io_bitmap_ptr;
19967
19968 if (bp) {
19969 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
19970 + struct tss_struct *tss = init_tss + get_cpu();
19971
19972 t->io_bitmap_ptr = NULL;
19973 clear_thread_flag(TIF_IO_BITMAP);
19974 @@ -93,6 +110,9 @@ void flush_thread(void)
19975
19976 clear_tsk_thread_flag(tsk, TIF_DEBUG);
19977
19978 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19979 + loadsegment(gs, 0);
19980 +#endif
19981 tsk->thread.debugreg0 = 0;
19982 tsk->thread.debugreg1 = 0;
19983 tsk->thread.debugreg2 = 0;
19984 @@ -307,7 +327,7 @@ void default_idle(void)
19985 EXPORT_SYMBOL(default_idle);
19986 #endif
19987
19988 -void stop_this_cpu(void *dummy)
19989 +__noreturn void stop_this_cpu(void *dummy)
19990 {
19991 local_irq_disable();
19992 /*
19993 @@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
19994 }
19995 early_param("idle", idle_setup);
19996
19997 -unsigned long arch_align_stack(unsigned long sp)
19998 +#ifdef CONFIG_PAX_RANDKSTACK
19999 +void pax_randomize_kstack(struct pt_regs *regs)
20000 {
20001 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
20002 - sp -= get_random_int() % 8192;
20003 - return sp & ~0xf;
20004 -}
20005 + struct thread_struct *thread = &current->thread;
20006 + unsigned long time;
20007
20008 -unsigned long arch_randomize_brk(struct mm_struct *mm)
20009 -{
20010 - unsigned long range_end = mm->brk + 0x02000000;
20011 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
20012 + if (!randomize_va_space)
20013 + return;
20014 +
20015 + if (v8086_mode(regs))
20016 + return;
20017 +
20018 + rdtscl(time);
20019 +
20020 + /* P4 seems to return a 0 LSB, ignore it */
20021 +#ifdef CONFIG_MPENTIUM4
20022 + time &= 0x3EUL;
20023 + time <<= 2;
20024 +#elif defined(CONFIG_X86_64)
20025 + time &= 0xFUL;
20026 + time <<= 4;
20027 +#else
20028 + time &= 0x1FUL;
20029 + time <<= 3;
20030 +#endif
20031 +
20032 + thread->sp0 ^= time;
20033 + load_sp0(init_tss + smp_processor_id(), thread);
20034 +
20035 +#ifdef CONFIG_X86_64
20036 + percpu_write(kernel_stack, thread->sp0);
20037 +#endif
20038 }
20039 +#endif
20040
20041 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
20042 index c40c432..6e1df72 100644
20043 --- a/arch/x86/kernel/process_32.c
20044 +++ b/arch/x86/kernel/process_32.c
20045 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
20046 unsigned long thread_saved_pc(struct task_struct *tsk)
20047 {
20048 return ((unsigned long *)tsk->thread.sp)[3];
20049 +//XXX return tsk->thread.eip;
20050 }
20051
20052 #ifndef CONFIG_SMP
20053 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
20054 unsigned short ss, gs;
20055 const char *board;
20056
20057 - if (user_mode_vm(regs)) {
20058 + if (user_mode(regs)) {
20059 sp = regs->sp;
20060 ss = regs->ss & 0xffff;
20061 - gs = get_user_gs(regs);
20062 } else {
20063 sp = (unsigned long) (&regs->sp);
20064 savesegment(ss, ss);
20065 - savesegment(gs, gs);
20066 }
20067 + gs = get_user_gs(regs);
20068
20069 printk("\n");
20070
20071 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
20072 regs.bx = (unsigned long) fn;
20073 regs.dx = (unsigned long) arg;
20074
20075 - regs.ds = __USER_DS;
20076 - regs.es = __USER_DS;
20077 + regs.ds = __KERNEL_DS;
20078 + regs.es = __KERNEL_DS;
20079 regs.fs = __KERNEL_PERCPU;
20080 - regs.gs = __KERNEL_STACK_CANARY;
20081 + savesegment(gs, regs.gs);
20082 regs.orig_ax = -1;
20083 regs.ip = (unsigned long) kernel_thread_helper;
20084 regs.cs = __KERNEL_CS | get_kernel_rpl();
20085 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20086 struct task_struct *tsk;
20087 int err;
20088
20089 - childregs = task_pt_regs(p);
20090 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
20091 *childregs = *regs;
20092 childregs->ax = 0;
20093 childregs->sp = sp;
20094
20095 p->thread.sp = (unsigned long) childregs;
20096 p->thread.sp0 = (unsigned long) (childregs+1);
20097 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
20098
20099 p->thread.ip = (unsigned long) ret_from_fork;
20100
20101 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20102 struct thread_struct *prev = &prev_p->thread,
20103 *next = &next_p->thread;
20104 int cpu = smp_processor_id();
20105 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
20106 + struct tss_struct *tss = init_tss + cpu;
20107 bool preload_fpu;
20108
20109 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
20110 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20111 */
20112 lazy_save_gs(prev->gs);
20113
20114 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20115 + __set_fs(task_thread_info(next_p)->addr_limit);
20116 +#endif
20117 +
20118 /*
20119 * Load the per-thread Thread-Local Storage descriptor.
20120 */
20121 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20122 */
20123 arch_end_context_switch(next_p);
20124
20125 + percpu_write(current_task, next_p);
20126 + percpu_write(current_tinfo, &next_p->tinfo);
20127 +
20128 if (preload_fpu)
20129 __math_state_restore();
20130
20131 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20132 if (prev->gs | next->gs)
20133 lazy_load_gs(next->gs);
20134
20135 - percpu_write(current_task, next_p);
20136 -
20137 return prev_p;
20138 }
20139
20140 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
20141 } while (count++ < 16);
20142 return 0;
20143 }
20144 -
20145 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
20146 index 39493bc..196816d 100644
20147 --- a/arch/x86/kernel/process_64.c
20148 +++ b/arch/x86/kernel/process_64.c
20149 @@ -91,7 +91,7 @@ static void __exit_idle(void)
20150 void exit_idle(void)
20151 {
20152 /* idle loop has pid 0 */
20153 - if (current->pid)
20154 + if (task_pid_nr(current))
20155 return;
20156 __exit_idle();
20157 }
20158 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
20159 if (!board)
20160 board = "";
20161 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
20162 - current->pid, current->comm, print_tainted(),
20163 + task_pid_nr(current), current->comm, print_tainted(),
20164 init_utsname()->release,
20165 (int)strcspn(init_utsname()->version, " "),
20166 init_utsname()->version, board);
20167 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20168 struct pt_regs *childregs;
20169 struct task_struct *me = current;
20170
20171 - childregs = ((struct pt_regs *)
20172 - (THREAD_SIZE + task_stack_page(p))) - 1;
20173 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
20174 *childregs = *regs;
20175
20176 childregs->ax = 0;
20177 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20178 p->thread.sp = (unsigned long) childregs;
20179 p->thread.sp0 = (unsigned long) (childregs+1);
20180 p->thread.usersp = me->thread.usersp;
20181 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
20182
20183 set_tsk_thread_flag(p, TIF_FORK);
20184
20185 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20186 struct thread_struct *prev = &prev_p->thread;
20187 struct thread_struct *next = &next_p->thread;
20188 int cpu = smp_processor_id();
20189 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
20190 + struct tss_struct *tss = init_tss + cpu;
20191 unsigned fsindex, gsindex;
20192 bool preload_fpu;
20193
20194 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20195 prev->usersp = percpu_read(old_rsp);
20196 percpu_write(old_rsp, next->usersp);
20197 percpu_write(current_task, next_p);
20198 + percpu_write(current_tinfo, &next_p->tinfo);
20199
20200 - percpu_write(kernel_stack,
20201 - (unsigned long)task_stack_page(next_p) +
20202 - THREAD_SIZE - KERNEL_STACK_OFFSET);
20203 + percpu_write(kernel_stack, next->sp0);
20204
20205 /*
20206 * Now maybe reload the debug registers and handle I/O bitmaps
20207 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
20208 if (!p || p == current || p->state == TASK_RUNNING)
20209 return 0;
20210 stack = (unsigned long)task_stack_page(p);
20211 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
20212 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
20213 return 0;
20214 fp = *(u64 *)(p->thread.sp);
20215 do {
20216 - if (fp < (unsigned long)stack ||
20217 - fp >= (unsigned long)stack+THREAD_SIZE)
20218 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
20219 return 0;
20220 ip = *(u64 *)(fp+8);
20221 if (!in_sched_functions(ip))
20222 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
20223 index c06acdd..09de221 100644
20224 --- a/arch/x86/kernel/ptrace.c
20225 +++ b/arch/x86/kernel/ptrace.c
20226 @@ -559,6 +559,10 @@ static int ioperm_active(struct task_struct *target,
20227 static int ioperm_get(struct task_struct *target,
20228 const struct user_regset *regset,
20229 unsigned int pos, unsigned int count,
20230 + void *kbuf, void __user *ubuf) __size_overflow(3,4);
20231 +static int ioperm_get(struct task_struct *target,
20232 + const struct user_regset *regset,
20233 + unsigned int pos, unsigned int count,
20234 void *kbuf, void __user *ubuf)
20235 {
20236 if (!target->thread.io_bitmap_ptr)
20237 @@ -925,7 +929,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
20238 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20239 {
20240 int ret;
20241 - unsigned long __user *datap = (unsigned long __user *)data;
20242 + unsigned long __user *datap = (__force unsigned long __user *)data;
20243
20244 switch (request) {
20245 /* read the word at location addr in the USER area. */
20246 @@ -1012,14 +1016,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20247 if (addr < 0)
20248 return -EIO;
20249 ret = do_get_thread_area(child, addr,
20250 - (struct user_desc __user *) data);
20251 + (__force struct user_desc __user *) data);
20252 break;
20253
20254 case PTRACE_SET_THREAD_AREA:
20255 if (addr < 0)
20256 return -EIO;
20257 ret = do_set_thread_area(child, addr,
20258 - (struct user_desc __user *) data, 0);
20259 + (__force struct user_desc __user *) data, 0);
20260 break;
20261 #endif
20262
20263 @@ -1038,12 +1042,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20264 #ifdef CONFIG_X86_PTRACE_BTS
20265 case PTRACE_BTS_CONFIG:
20266 ret = ptrace_bts_config
20267 - (child, data, (struct ptrace_bts_config __user *)addr);
20268 + (child, data, (__force struct ptrace_bts_config __user *)addr);
20269 break;
20270
20271 case PTRACE_BTS_STATUS:
20272 ret = ptrace_bts_status
20273 - (child, data, (struct ptrace_bts_config __user *)addr);
20274 + (child, data, (__force struct ptrace_bts_config __user *)addr);
20275 break;
20276
20277 case PTRACE_BTS_SIZE:
20278 @@ -1052,7 +1056,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20279
20280 case PTRACE_BTS_GET:
20281 ret = ptrace_bts_read_record
20282 - (child, data, (struct bts_struct __user *) addr);
20283 + (child, data, (__force struct bts_struct __user *) addr);
20284 break;
20285
20286 case PTRACE_BTS_CLEAR:
20287 @@ -1061,7 +1065,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20288
20289 case PTRACE_BTS_DRAIN:
20290 ret = ptrace_bts_drain
20291 - (child, data, (struct bts_struct __user *) addr);
20292 + (child, data, (__force struct bts_struct __user *) addr);
20293 break;
20294 #endif /* CONFIG_X86_PTRACE_BTS */
20295
20296 @@ -1450,7 +1454,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20297 info.si_code = si_code;
20298
20299 /* User-mode ip? */
20300 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
20301 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
20302
20303 /* Send us the fake SIGTRAP */
20304 force_sig_info(SIGTRAP, &info, tsk);
20305 @@ -1469,7 +1473,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20306 * We must return the syscall number to actually look up in the table.
20307 * This can be -1L to skip running any syscall at all.
20308 */
20309 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
20310 +long syscall_trace_enter(struct pt_regs *regs)
20311 {
20312 long ret = 0;
20313
20314 @@ -1514,7 +1518,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
20315 return ret ?: regs->orig_ax;
20316 }
20317
20318 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
20319 +void syscall_trace_leave(struct pt_regs *regs)
20320 {
20321 if (unlikely(current->audit_context))
20322 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
20323 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
20324 index cf98100..e76e03d 100644
20325 --- a/arch/x86/kernel/reboot.c
20326 +++ b/arch/x86/kernel/reboot.c
20327 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
20328 EXPORT_SYMBOL(pm_power_off);
20329
20330 static const struct desc_ptr no_idt = {};
20331 -static int reboot_mode;
20332 +static unsigned short reboot_mode;
20333 enum reboot_type reboot_type = BOOT_KBD;
20334 int reboot_force;
20335
20336 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
20337 controller to pulse the CPU reset line, which is more thorough, but
20338 doesn't work with at least one type of 486 motherboard. It is easy
20339 to stop this code working; hence the copious comments. */
20340 -static const unsigned long long
20341 -real_mode_gdt_entries [3] =
20342 +static struct desc_struct
20343 +real_mode_gdt_entries [3] __read_only =
20344 {
20345 - 0x0000000000000000ULL, /* Null descriptor */
20346 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
20347 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
20348 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
20349 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
20350 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
20351 };
20352
20353 static const struct desc_ptr
20354 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
20355 * specified by the code and length parameters.
20356 * We assume that length will aways be less that 100!
20357 */
20358 -void machine_real_restart(const unsigned char *code, int length)
20359 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
20360 {
20361 local_irq_disable();
20362
20363 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
20364 /* Remap the kernel at virtual address zero, as well as offset zero
20365 from the kernel segment. This assumes the kernel segment starts at
20366 virtual address PAGE_OFFSET. */
20367 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20368 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
20369 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20370 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20371
20372 /*
20373 * Use `swapper_pg_dir' as our page directory.
20374 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
20375 boot)". This seems like a fairly standard thing that gets set by
20376 REBOOT.COM programs, and the previous reset routine did this
20377 too. */
20378 - *((unsigned short *)0x472) = reboot_mode;
20379 + *(unsigned short *)(__va(0x472)) = reboot_mode;
20380
20381 /* For the switch to real mode, copy some code to low memory. It has
20382 to be in the first 64k because it is running in 16-bit mode, and it
20383 has to have the same physical and virtual address, because it turns
20384 off paging. Copy it near the end of the first page, out of the way
20385 of BIOS variables. */
20386 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
20387 - real_mode_switch, sizeof (real_mode_switch));
20388 - memcpy((void *)(0x1000 - 100), code, length);
20389 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
20390 + memcpy(__va(0x1000 - 100), code, length);
20391
20392 /* Set up the IDT for real mode. */
20393 load_idt(&real_mode_idt);
20394 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
20395 __asm__ __volatile__ ("ljmp $0x0008,%0"
20396 :
20397 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
20398 + do { } while (1);
20399 }
20400 #ifdef CONFIG_APM_MODULE
20401 EXPORT_SYMBOL(machine_real_restart);
20402 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
20403 {
20404 }
20405
20406 -static void native_machine_emergency_restart(void)
20407 +__noreturn static void native_machine_emergency_restart(void)
20408 {
20409 int i;
20410
20411 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
20412 #endif
20413 }
20414
20415 -static void __machine_emergency_restart(int emergency)
20416 +static __noreturn void __machine_emergency_restart(int emergency)
20417 {
20418 reboot_emergency = emergency;
20419 machine_ops.emergency_restart();
20420 }
20421
20422 -static void native_machine_restart(char *__unused)
20423 +static __noreturn void native_machine_restart(char *__unused)
20424 {
20425 printk("machine restart\n");
20426
20427 @@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
20428 __machine_emergency_restart(0);
20429 }
20430
20431 -static void native_machine_halt(void)
20432 +static __noreturn void native_machine_halt(void)
20433 {
20434 /* stop other cpus and apics */
20435 machine_shutdown();
20436 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
20437 stop_this_cpu(NULL);
20438 }
20439
20440 -static void native_machine_power_off(void)
20441 +__noreturn static void native_machine_power_off(void)
20442 {
20443 if (pm_power_off) {
20444 if (!reboot_force)
20445 @@ -694,6 +694,7 @@ static void native_machine_power_off(void)
20446 }
20447 /* a fallback in case there is no PM info available */
20448 tboot_shutdown(TB_SHUTDOWN_HALT);
20449 + do { } while (1);
20450 }
20451
20452 struct machine_ops machine_ops = {
20453 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
20454 index 7a6f3b3..976a959 100644
20455 --- a/arch/x86/kernel/relocate_kernel_64.S
20456 +++ b/arch/x86/kernel/relocate_kernel_64.S
20457 @@ -11,6 +11,7 @@
20458 #include <asm/kexec.h>
20459 #include <asm/processor-flags.h>
20460 #include <asm/pgtable_types.h>
20461 +#include <asm/alternative-asm.h>
20462
20463 /*
20464 * Must be relocatable PIC code callable as a C function
20465 @@ -167,6 +168,7 @@ identity_mapped:
20466 xorq %r14, %r14
20467 xorq %r15, %r15
20468
20469 + pax_force_retaddr 0, 1
20470 ret
20471
20472 1:
20473 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
20474 index 5449a26..0b6c759 100644
20475 --- a/arch/x86/kernel/setup.c
20476 +++ b/arch/x86/kernel/setup.c
20477 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
20478
20479 if (!boot_params.hdr.root_flags)
20480 root_mountflags &= ~MS_RDONLY;
20481 - init_mm.start_code = (unsigned long) _text;
20482 - init_mm.end_code = (unsigned long) _etext;
20483 + init_mm.start_code = ktla_ktva((unsigned long) _text);
20484 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
20485 init_mm.end_data = (unsigned long) _edata;
20486 init_mm.brk = _brk_end;
20487
20488 - code_resource.start = virt_to_phys(_text);
20489 - code_resource.end = virt_to_phys(_etext)-1;
20490 - data_resource.start = virt_to_phys(_etext);
20491 + code_resource.start = virt_to_phys(ktla_ktva(_text));
20492 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
20493 + data_resource.start = virt_to_phys(_sdata);
20494 data_resource.end = virt_to_phys(_edata)-1;
20495 bss_resource.start = virt_to_phys(&__bss_start);
20496 bss_resource.end = virt_to_phys(&__bss_stop)-1;
20497 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
20498 index d559af9..244f55d 100644
20499 --- a/arch/x86/kernel/setup_percpu.c
20500 +++ b/arch/x86/kernel/setup_percpu.c
20501 @@ -25,19 +25,17 @@
20502 # define DBG(x...)
20503 #endif
20504
20505 -DEFINE_PER_CPU(int, cpu_number);
20506 +#ifdef CONFIG_SMP
20507 +DEFINE_PER_CPU(unsigned int, cpu_number);
20508 EXPORT_PER_CPU_SYMBOL(cpu_number);
20509 +#endif
20510
20511 -#ifdef CONFIG_X86_64
20512 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
20513 -#else
20514 -#define BOOT_PERCPU_OFFSET 0
20515 -#endif
20516
20517 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
20518 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
20519
20520 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
20521 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
20522 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
20523 };
20524 EXPORT_SYMBOL(__per_cpu_offset);
20525 @@ -100,6 +98,8 @@ static bool __init pcpu_need_numa(void)
20526 * Pointer to the allocated area on success, NULL on failure.
20527 */
20528 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20529 + unsigned long align) __size_overflow(2);
20530 +static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20531 unsigned long align)
20532 {
20533 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
20534 @@ -128,6 +128,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20535 /*
20536 * Helpers for first chunk memory allocation
20537 */
20538 +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) __size_overflow(2);
20539 +
20540 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
20541 {
20542 return pcpu_alloc_bootmem(cpu, size, align);
20543 @@ -159,10 +161,10 @@ static inline void setup_percpu_segment(int cpu)
20544 {
20545 #ifdef CONFIG_X86_32
20546 struct desc_struct gdt;
20547 + unsigned long base = per_cpu_offset(cpu);
20548
20549 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
20550 - 0x2 | DESCTYPE_S, 0x8);
20551 - gdt.s = 1;
20552 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
20553 + 0x83 | DESCTYPE_S, 0xC);
20554 write_gdt_entry(get_cpu_gdt_table(cpu),
20555 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
20556 #endif
20557 @@ -212,6 +214,11 @@ void __init setup_per_cpu_areas(void)
20558 /* alrighty, percpu areas up and running */
20559 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
20560 for_each_possible_cpu(cpu) {
20561 +#ifdef CONFIG_CC_STACKPROTECTOR
20562 +#ifdef CONFIG_X86_32
20563 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
20564 +#endif
20565 +#endif
20566 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
20567 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
20568 per_cpu(cpu_number, cpu) = cpu;
20569 @@ -239,6 +246,12 @@ void __init setup_per_cpu_areas(void)
20570 early_per_cpu_map(x86_cpu_to_node_map, cpu);
20571 #endif
20572 #endif
20573 +#ifdef CONFIG_CC_STACKPROTECTOR
20574 +#ifdef CONFIG_X86_32
20575 + if (!cpu)
20576 + per_cpu(stack_canary.canary, cpu) = canary;
20577 +#endif
20578 +#endif
20579 /*
20580 * Up to this point, the boot CPU has been using .data.init
20581 * area. Reload any changed state for the boot CPU.
20582 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
20583 index 6a44a76..a9287a1 100644
20584 --- a/arch/x86/kernel/signal.c
20585 +++ b/arch/x86/kernel/signal.c
20586 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
20587 * Align the stack pointer according to the i386 ABI,
20588 * i.e. so that on function entry ((sp + 4) & 15) == 0.
20589 */
20590 - sp = ((sp + 4) & -16ul) - 4;
20591 + sp = ((sp - 12) & -16ul) - 4;
20592 #else /* !CONFIG_X86_32 */
20593 sp = round_down(sp, 16) - 8;
20594 #endif
20595 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
20596 * Return an always-bogus address instead so we will die with SIGSEGV.
20597 */
20598 if (onsigstack && !likely(on_sig_stack(sp)))
20599 - return (void __user *)-1L;
20600 + return (__force void __user *)-1L;
20601
20602 /* save i387 state */
20603 if (used_math() && save_i387_xstate(*fpstate) < 0)
20604 - return (void __user *)-1L;
20605 + return (__force void __user *)-1L;
20606
20607 return (void __user *)sp;
20608 }
20609 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20610 }
20611
20612 if (current->mm->context.vdso)
20613 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20614 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20615 else
20616 - restorer = &frame->retcode;
20617 + restorer = (void __user *)&frame->retcode;
20618 if (ka->sa.sa_flags & SA_RESTORER)
20619 restorer = ka->sa.sa_restorer;
20620
20621 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20622 * reasons and because gdb uses it as a signature to notice
20623 * signal handler stack frames.
20624 */
20625 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
20626 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
20627
20628 if (err)
20629 return -EFAULT;
20630 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20631 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
20632
20633 /* Set up to return from userspace. */
20634 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20635 + if (current->mm->context.vdso)
20636 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20637 + else
20638 + restorer = (void __user *)&frame->retcode;
20639 if (ka->sa.sa_flags & SA_RESTORER)
20640 restorer = ka->sa.sa_restorer;
20641 put_user_ex(restorer, &frame->pretcode);
20642 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20643 * reasons and because gdb uses it as a signature to notice
20644 * signal handler stack frames.
20645 */
20646 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
20647 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
20648 } put_user_catch(err);
20649
20650 if (err)
20651 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
20652 int signr;
20653 sigset_t *oldset;
20654
20655 + pax_track_stack();
20656 +
20657 /*
20658 * We want the common case to go fast, which is why we may in certain
20659 * cases get here from kernel mode. Just return without doing anything
20660 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
20661 * X86_32: vm86 regs switched out by assembly code before reaching
20662 * here, so testing against kernel CS suffices.
20663 */
20664 - if (!user_mode(regs))
20665 + if (!user_mode_novm(regs))
20666 return;
20667
20668 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
20669 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
20670 index 7e8e905..64d5c32 100644
20671 --- a/arch/x86/kernel/smpboot.c
20672 +++ b/arch/x86/kernel/smpboot.c
20673 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
20674 */
20675 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
20676
20677 -void cpu_hotplug_driver_lock()
20678 +void cpu_hotplug_driver_lock(void)
20679 {
20680 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
20681 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
20682 }
20683
20684 -void cpu_hotplug_driver_unlock()
20685 +void cpu_hotplug_driver_unlock(void)
20686 {
20687 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
20688 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
20689 }
20690
20691 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
20692 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
20693 * target processor state.
20694 */
20695 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
20696 - (unsigned long)stack_start.sp);
20697 + stack_start);
20698
20699 /*
20700 * Run STARTUP IPI loop.
20701 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
20702 set_idle_for_cpu(cpu, c_idle.idle);
20703 do_rest:
20704 per_cpu(current_task, cpu) = c_idle.idle;
20705 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
20706 #ifdef CONFIG_X86_32
20707 /* Stack for startup_32 can be just as for start_secondary onwards */
20708 irq_ctx_init(cpu);
20709 @@ -750,13 +751,15 @@ do_rest:
20710 #else
20711 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
20712 initial_gs = per_cpu_offset(cpu);
20713 - per_cpu(kernel_stack, cpu) =
20714 - (unsigned long)task_stack_page(c_idle.idle) -
20715 - KERNEL_STACK_OFFSET + THREAD_SIZE;
20716 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
20717 #endif
20718 +
20719 + pax_open_kernel();
20720 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20721 + pax_close_kernel();
20722 +
20723 initial_code = (unsigned long)start_secondary;
20724 - stack_start.sp = (void *) c_idle.idle->thread.sp;
20725 + stack_start = c_idle.idle->thread.sp;
20726
20727 /* start_ip had better be page-aligned! */
20728 start_ip = setup_trampoline();
20729 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
20730
20731 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
20732
20733 +#ifdef CONFIG_PAX_PER_CPU_PGD
20734 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
20735 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20736 + KERNEL_PGD_PTRS);
20737 +#endif
20738 +
20739 err = do_boot_cpu(apicid, cpu);
20740
20741 if (err) {
20742 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
20743 index 3149032..14f1053 100644
20744 --- a/arch/x86/kernel/step.c
20745 +++ b/arch/x86/kernel/step.c
20746 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20747 struct desc_struct *desc;
20748 unsigned long base;
20749
20750 - seg &= ~7UL;
20751 + seg >>= 3;
20752
20753 mutex_lock(&child->mm->context.lock);
20754 - if (unlikely((seg >> 3) >= child->mm->context.size))
20755 + if (unlikely(seg >= child->mm->context.size))
20756 addr = -1L; /* bogus selector, access would fault */
20757 else {
20758 desc = child->mm->context.ldt + seg;
20759 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20760 addr += base;
20761 }
20762 mutex_unlock(&child->mm->context.lock);
20763 - }
20764 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
20765 + addr = ktla_ktva(addr);
20766
20767 return addr;
20768 }
20769 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20770 unsigned char opcode[15];
20771 unsigned long addr = convert_ip_to_linear(child, regs);
20772
20773 + if (addr == -EINVAL)
20774 + return 0;
20775 +
20776 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
20777 for (i = 0; i < copied; i++) {
20778 switch (opcode[i]) {
20779 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20780
20781 #ifdef CONFIG_X86_64
20782 case 0x40 ... 0x4f:
20783 - if (regs->cs != __USER_CS)
20784 + if ((regs->cs & 0xffff) != __USER_CS)
20785 /* 32-bit mode: register increment */
20786 return 0;
20787 /* 64-bit mode: REX prefix */
20788 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
20789 index dee1ff7..a397f7f 100644
20790 --- a/arch/x86/kernel/sys_i386_32.c
20791 +++ b/arch/x86/kernel/sys_i386_32.c
20792 @@ -24,6 +24,21 @@
20793
20794 #include <asm/syscalls.h>
20795
20796 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
20797 +{
20798 + unsigned long pax_task_size = TASK_SIZE;
20799 +
20800 +#ifdef CONFIG_PAX_SEGMEXEC
20801 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
20802 + pax_task_size = SEGMEXEC_TASK_SIZE;
20803 +#endif
20804 +
20805 + if (len > pax_task_size || addr > pax_task_size - len)
20806 + return -EINVAL;
20807 +
20808 + return 0;
20809 +}
20810 +
20811 /*
20812 * Perform the select(nd, in, out, ex, tv) and mmap() system
20813 * calls. Linux/i386 didn't use to be able to handle more than
20814 @@ -58,6 +73,212 @@ out:
20815 return err;
20816 }
20817
20818 +unsigned long
20819 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
20820 + unsigned long len, unsigned long pgoff, unsigned long flags)
20821 +{
20822 + struct mm_struct *mm = current->mm;
20823 + struct vm_area_struct *vma;
20824 + unsigned long start_addr, pax_task_size = TASK_SIZE;
20825 +
20826 +#ifdef CONFIG_PAX_SEGMEXEC
20827 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20828 + pax_task_size = SEGMEXEC_TASK_SIZE;
20829 +#endif
20830 +
20831 + pax_task_size -= PAGE_SIZE;
20832 +
20833 + if (len > pax_task_size)
20834 + return -ENOMEM;
20835 +
20836 + if (flags & MAP_FIXED)
20837 + return addr;
20838 +
20839 +#ifdef CONFIG_PAX_RANDMMAP
20840 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20841 +#endif
20842 +
20843 + if (addr) {
20844 + addr = PAGE_ALIGN(addr);
20845 + if (pax_task_size - len >= addr) {
20846 + vma = find_vma(mm, addr);
20847 + if (check_heap_stack_gap(vma, addr, len))
20848 + return addr;
20849 + }
20850 + }
20851 + if (len > mm->cached_hole_size) {
20852 + start_addr = addr = mm->free_area_cache;
20853 + } else {
20854 + start_addr = addr = mm->mmap_base;
20855 + mm->cached_hole_size = 0;
20856 + }
20857 +
20858 +#ifdef CONFIG_PAX_PAGEEXEC
20859 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
20860 + start_addr = 0x00110000UL;
20861 +
20862 +#ifdef CONFIG_PAX_RANDMMAP
20863 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20864 + start_addr += mm->delta_mmap & 0x03FFF000UL;
20865 +#endif
20866 +
20867 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
20868 + start_addr = addr = mm->mmap_base;
20869 + else
20870 + addr = start_addr;
20871 + }
20872 +#endif
20873 +
20874 +full_search:
20875 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20876 + /* At this point: (!vma || addr < vma->vm_end). */
20877 + if (pax_task_size - len < addr) {
20878 + /*
20879 + * Start a new search - just in case we missed
20880 + * some holes.
20881 + */
20882 + if (start_addr != mm->mmap_base) {
20883 + start_addr = addr = mm->mmap_base;
20884 + mm->cached_hole_size = 0;
20885 + goto full_search;
20886 + }
20887 + return -ENOMEM;
20888 + }
20889 + if (check_heap_stack_gap(vma, addr, len))
20890 + break;
20891 + if (addr + mm->cached_hole_size < vma->vm_start)
20892 + mm->cached_hole_size = vma->vm_start - addr;
20893 + addr = vma->vm_end;
20894 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
20895 + start_addr = addr = mm->mmap_base;
20896 + mm->cached_hole_size = 0;
20897 + goto full_search;
20898 + }
20899 + }
20900 +
20901 + /*
20902 + * Remember the place where we stopped the search:
20903 + */
20904 + mm->free_area_cache = addr + len;
20905 + return addr;
20906 +}
20907 +
20908 +unsigned long
20909 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20910 + const unsigned long len, const unsigned long pgoff,
20911 + const unsigned long flags)
20912 +{
20913 + struct vm_area_struct *vma;
20914 + struct mm_struct *mm = current->mm;
20915 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
20916 +
20917 +#ifdef CONFIG_PAX_SEGMEXEC
20918 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20919 + pax_task_size = SEGMEXEC_TASK_SIZE;
20920 +#endif
20921 +
20922 + pax_task_size -= PAGE_SIZE;
20923 +
20924 + /* requested length too big for entire address space */
20925 + if (len > pax_task_size)
20926 + return -ENOMEM;
20927 +
20928 + if (flags & MAP_FIXED)
20929 + return addr;
20930 +
20931 +#ifdef CONFIG_PAX_PAGEEXEC
20932 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
20933 + goto bottomup;
20934 +#endif
20935 +
20936 +#ifdef CONFIG_PAX_RANDMMAP
20937 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20938 +#endif
20939 +
20940 + /* requesting a specific address */
20941 + if (addr) {
20942 + addr = PAGE_ALIGN(addr);
20943 + if (pax_task_size - len >= addr) {
20944 + vma = find_vma(mm, addr);
20945 + if (check_heap_stack_gap(vma, addr, len))
20946 + return addr;
20947 + }
20948 + }
20949 +
20950 + /* check if free_area_cache is useful for us */
20951 + if (len <= mm->cached_hole_size) {
20952 + mm->cached_hole_size = 0;
20953 + mm->free_area_cache = mm->mmap_base;
20954 + }
20955 +
20956 + /* either no address requested or can't fit in requested address hole */
20957 + addr = mm->free_area_cache;
20958 +
20959 + /* make sure it can fit in the remaining address space */
20960 + if (addr > len) {
20961 + vma = find_vma(mm, addr-len);
20962 + if (check_heap_stack_gap(vma, addr - len, len))
20963 + /* remember the address as a hint for next time */
20964 + return (mm->free_area_cache = addr-len);
20965 + }
20966 +
20967 + if (mm->mmap_base < len)
20968 + goto bottomup;
20969 +
20970 + addr = mm->mmap_base-len;
20971 +
20972 + do {
20973 + /*
20974 + * Lookup failure means no vma is above this address,
20975 + * else if new region fits below vma->vm_start,
20976 + * return with success:
20977 + */
20978 + vma = find_vma(mm, addr);
20979 + if (check_heap_stack_gap(vma, addr, len))
20980 + /* remember the address as a hint for next time */
20981 + return (mm->free_area_cache = addr);
20982 +
20983 + /* remember the largest hole we saw so far */
20984 + if (addr + mm->cached_hole_size < vma->vm_start)
20985 + mm->cached_hole_size = vma->vm_start - addr;
20986 +
20987 + /* try just below the current vma->vm_start */
20988 + addr = skip_heap_stack_gap(vma, len);
20989 + } while (!IS_ERR_VALUE(addr));
20990 +
20991 +bottomup:
20992 + /*
20993 + * A failed mmap() very likely causes application failure,
20994 + * so fall back to the bottom-up function here. This scenario
20995 + * can happen with large stack limits and large mmap()
20996 + * allocations.
20997 + */
20998 +
20999 +#ifdef CONFIG_PAX_SEGMEXEC
21000 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21001 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21002 + else
21003 +#endif
21004 +
21005 + mm->mmap_base = TASK_UNMAPPED_BASE;
21006 +
21007 +#ifdef CONFIG_PAX_RANDMMAP
21008 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21009 + mm->mmap_base += mm->delta_mmap;
21010 +#endif
21011 +
21012 + mm->free_area_cache = mm->mmap_base;
21013 + mm->cached_hole_size = ~0UL;
21014 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
21015 + /*
21016 + * Restore the topdown base:
21017 + */
21018 + mm->mmap_base = base;
21019 + mm->free_area_cache = base;
21020 + mm->cached_hole_size = ~0UL;
21021 +
21022 + return addr;
21023 +}
21024
21025 struct sel_arg_struct {
21026 unsigned long n;
21027 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
21028 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
21029 case SEMTIMEDOP:
21030 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
21031 - (const struct timespec __user *)fifth);
21032 + (__force const struct timespec __user *)fifth);
21033
21034 case SEMGET:
21035 return sys_semget(first, second, third);
21036 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
21037 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
21038 if (ret)
21039 return ret;
21040 - return put_user(raddr, (ulong __user *) third);
21041 + return put_user(raddr, (__force ulong __user *) third);
21042 }
21043 case 1: /* iBCS2 emulator entry point */
21044 if (!segment_eq(get_fs(), get_ds()))
21045 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
21046
21047 return error;
21048 }
21049 -
21050 -
21051 -/*
21052 - * Do a system call from kernel instead of calling sys_execve so we
21053 - * end up with proper pt_regs.
21054 - */
21055 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
21056 -{
21057 - long __res;
21058 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
21059 - : "=a" (__res)
21060 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
21061 - return __res;
21062 -}
21063 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
21064 index 8aa2057..b604bc1 100644
21065 --- a/arch/x86/kernel/sys_x86_64.c
21066 +++ b/arch/x86/kernel/sys_x86_64.c
21067 @@ -32,8 +32,8 @@ out:
21068 return error;
21069 }
21070
21071 -static void find_start_end(unsigned long flags, unsigned long *begin,
21072 - unsigned long *end)
21073 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
21074 + unsigned long *begin, unsigned long *end)
21075 {
21076 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
21077 unsigned long new_begin;
21078 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
21079 *begin = new_begin;
21080 }
21081 } else {
21082 - *begin = TASK_UNMAPPED_BASE;
21083 + *begin = mm->mmap_base;
21084 *end = TASK_SIZE;
21085 }
21086 }
21087 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
21088 if (flags & MAP_FIXED)
21089 return addr;
21090
21091 - find_start_end(flags, &begin, &end);
21092 + find_start_end(mm, flags, &begin, &end);
21093
21094 if (len > end)
21095 return -ENOMEM;
21096
21097 +#ifdef CONFIG_PAX_RANDMMAP
21098 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
21099 +#endif
21100 +
21101 if (addr) {
21102 addr = PAGE_ALIGN(addr);
21103 vma = find_vma(mm, addr);
21104 - if (end - len >= addr &&
21105 - (!vma || addr + len <= vma->vm_start))
21106 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
21107 return addr;
21108 }
21109 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
21110 @@ -106,7 +109,7 @@ full_search:
21111 }
21112 return -ENOMEM;
21113 }
21114 - if (!vma || addr + len <= vma->vm_start) {
21115 + if (check_heap_stack_gap(vma, addr, len)) {
21116 /*
21117 * Remember the place where we stopped the search:
21118 */
21119 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21120 {
21121 struct vm_area_struct *vma;
21122 struct mm_struct *mm = current->mm;
21123 - unsigned long addr = addr0;
21124 + unsigned long base = mm->mmap_base, addr = addr0;
21125
21126 /* requested length too big for entire address space */
21127 if (len > TASK_SIZE)
21128 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21129 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
21130 goto bottomup;
21131
21132 +#ifdef CONFIG_PAX_RANDMMAP
21133 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
21134 +#endif
21135 +
21136 /* requesting a specific address */
21137 if (addr) {
21138 addr = PAGE_ALIGN(addr);
21139 - vma = find_vma(mm, addr);
21140 - if (TASK_SIZE - len >= addr &&
21141 - (!vma || addr + len <= vma->vm_start))
21142 - return addr;
21143 + if (TASK_SIZE - len >= addr) {
21144 + vma = find_vma(mm, addr);
21145 + if (check_heap_stack_gap(vma, addr, len))
21146 + return addr;
21147 + }
21148 }
21149
21150 /* check if free_area_cache is useful for us */
21151 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21152 /* make sure it can fit in the remaining address space */
21153 if (addr > len) {
21154 vma = find_vma(mm, addr-len);
21155 - if (!vma || addr <= vma->vm_start)
21156 + if (check_heap_stack_gap(vma, addr - len, len))
21157 /* remember the address as a hint for next time */
21158 return mm->free_area_cache = addr-len;
21159 }
21160 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21161 * return with success:
21162 */
21163 vma = find_vma(mm, addr);
21164 - if (!vma || addr+len <= vma->vm_start)
21165 + if (check_heap_stack_gap(vma, addr, len))
21166 /* remember the address as a hint for next time */
21167 return mm->free_area_cache = addr;
21168
21169 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21170 mm->cached_hole_size = vma->vm_start - addr;
21171
21172 /* try just below the current vma->vm_start */
21173 - addr = vma->vm_start-len;
21174 - } while (len < vma->vm_start);
21175 + addr = skip_heap_stack_gap(vma, len);
21176 + } while (!IS_ERR_VALUE(addr));
21177
21178 bottomup:
21179 /*
21180 @@ -198,13 +206,21 @@ bottomup:
21181 * can happen with large stack limits and large mmap()
21182 * allocations.
21183 */
21184 + mm->mmap_base = TASK_UNMAPPED_BASE;
21185 +
21186 +#ifdef CONFIG_PAX_RANDMMAP
21187 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21188 + mm->mmap_base += mm->delta_mmap;
21189 +#endif
21190 +
21191 + mm->free_area_cache = mm->mmap_base;
21192 mm->cached_hole_size = ~0UL;
21193 - mm->free_area_cache = TASK_UNMAPPED_BASE;
21194 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
21195 /*
21196 * Restore the topdown base:
21197 */
21198 - mm->free_area_cache = mm->mmap_base;
21199 + mm->mmap_base = base;
21200 + mm->free_area_cache = base;
21201 mm->cached_hole_size = ~0UL;
21202
21203 return addr;
21204 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
21205 index 76d70a4..4c94a44 100644
21206 --- a/arch/x86/kernel/syscall_table_32.S
21207 +++ b/arch/x86/kernel/syscall_table_32.S
21208 @@ -1,3 +1,4 @@
21209 +.section .rodata,"a",@progbits
21210 ENTRY(sys_call_table)
21211 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
21212 .long sys_exit
21213 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
21214 index 46b8277..3349d55 100644
21215 --- a/arch/x86/kernel/tboot.c
21216 +++ b/arch/x86/kernel/tboot.c
21217 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
21218
21219 void tboot_shutdown(u32 shutdown_type)
21220 {
21221 - void (*shutdown)(void);
21222 + void (* __noreturn shutdown)(void);
21223
21224 if (!tboot_enabled())
21225 return;
21226 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
21227
21228 switch_to_tboot_pt();
21229
21230 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
21231 + shutdown = (void *)tboot->shutdown_entry;
21232 shutdown();
21233
21234 /* should not reach here */
21235 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
21236 tboot_shutdown(acpi_shutdown_map[sleep_state]);
21237 }
21238
21239 -static atomic_t ap_wfs_count;
21240 +static atomic_unchecked_t ap_wfs_count;
21241
21242 static int tboot_wait_for_aps(int num_aps)
21243 {
21244 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
21245 {
21246 switch (action) {
21247 case CPU_DYING:
21248 - atomic_inc(&ap_wfs_count);
21249 + atomic_inc_unchecked(&ap_wfs_count);
21250 if (num_online_cpus() == 1)
21251 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
21252 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
21253 return NOTIFY_BAD;
21254 break;
21255 }
21256 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
21257
21258 tboot_create_trampoline();
21259
21260 - atomic_set(&ap_wfs_count, 0);
21261 + atomic_set_unchecked(&ap_wfs_count, 0);
21262 register_hotcpu_notifier(&tboot_cpu_notifier);
21263 return 0;
21264 }
21265 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
21266 index be25734..87fe232 100644
21267 --- a/arch/x86/kernel/time.c
21268 +++ b/arch/x86/kernel/time.c
21269 @@ -26,17 +26,13 @@
21270 int timer_ack;
21271 #endif
21272
21273 -#ifdef CONFIG_X86_64
21274 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
21275 -#endif
21276 -
21277 unsigned long profile_pc(struct pt_regs *regs)
21278 {
21279 unsigned long pc = instruction_pointer(regs);
21280
21281 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
21282 + if (!user_mode(regs) && in_lock_functions(pc)) {
21283 #ifdef CONFIG_FRAME_POINTER
21284 - return *(unsigned long *)(regs->bp + sizeof(long));
21285 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
21286 #else
21287 unsigned long *sp =
21288 (unsigned long *)kernel_stack_pointer(regs);
21289 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
21290 * or above a saved flags. Eflags has bits 22-31 zero,
21291 * kernel addresses don't.
21292 */
21293 +
21294 +#ifdef CONFIG_PAX_KERNEXEC
21295 + return ktla_ktva(sp[0]);
21296 +#else
21297 if (sp[0] >> 22)
21298 return sp[0];
21299 if (sp[1] >> 22)
21300 return sp[1];
21301 #endif
21302 +
21303 +#endif
21304 }
21305 return pc;
21306 }
21307 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
21308 index 6bb7b85..dd853e1 100644
21309 --- a/arch/x86/kernel/tls.c
21310 +++ b/arch/x86/kernel/tls.c
21311 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
21312 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
21313 return -EINVAL;
21314
21315 +#ifdef CONFIG_PAX_SEGMEXEC
21316 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
21317 + return -EINVAL;
21318 +#endif
21319 +
21320 set_tls_desc(p, idx, &info, 1);
21321
21322 return 0;
21323 diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
21324 index 2f083a2..7d3fecc 100644
21325 --- a/arch/x86/kernel/tls.h
21326 +++ b/arch/x86/kernel/tls.h
21327 @@ -16,6 +16,6 @@
21328
21329 extern user_regset_active_fn regset_tls_active;
21330 extern user_regset_get_fn regset_tls_get;
21331 -extern user_regset_set_fn regset_tls_set;
21332 +extern user_regset_set_fn regset_tls_set __size_overflow(4);
21333
21334 #endif /* _ARCH_X86_KERNEL_TLS_H */
21335 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
21336 index 8508237..229b664 100644
21337 --- a/arch/x86/kernel/trampoline_32.S
21338 +++ b/arch/x86/kernel/trampoline_32.S
21339 @@ -32,6 +32,12 @@
21340 #include <asm/segment.h>
21341 #include <asm/page_types.h>
21342
21343 +#ifdef CONFIG_PAX_KERNEXEC
21344 +#define ta(X) (X)
21345 +#else
21346 +#define ta(X) ((X) - __PAGE_OFFSET)
21347 +#endif
21348 +
21349 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
21350 __CPUINITRODATA
21351 .code16
21352 @@ -60,7 +66,7 @@ r_base = .
21353 inc %ax # protected mode (PE) bit
21354 lmsw %ax # into protected mode
21355 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
21356 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
21357 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
21358
21359 # These need to be in the same 64K segment as the above;
21360 # hence we don't use the boot_gdt_descr defined in head.S
21361 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
21362 index 3af2dff..ba8aa49 100644
21363 --- a/arch/x86/kernel/trampoline_64.S
21364 +++ b/arch/x86/kernel/trampoline_64.S
21365 @@ -91,7 +91,7 @@ startup_32:
21366 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
21367 movl %eax, %ds
21368
21369 - movl $X86_CR4_PAE, %eax
21370 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
21371 movl %eax, %cr4 # Enable PAE mode
21372
21373 # Setup trampoline 4 level pagetables
21374 @@ -127,7 +127,7 @@ startup_64:
21375 no_longmode:
21376 hlt
21377 jmp no_longmode
21378 -#include "verify_cpu_64.S"
21379 +#include "verify_cpu.S"
21380
21381 # Careful these need to be in the same 64K segment as the above;
21382 tidt:
21383 @@ -138,7 +138,7 @@ tidt:
21384 # so the kernel can live anywhere
21385 .balign 4
21386 tgdt:
21387 - .short tgdt_end - tgdt # gdt limit
21388 + .short tgdt_end - tgdt - 1 # gdt limit
21389 .long tgdt - r_base
21390 .short 0
21391 .quad 0x00cf9b000000ffff # __KERNEL32_CS
21392 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
21393 index 7e37dce..ec3f8e5 100644
21394 --- a/arch/x86/kernel/traps.c
21395 +++ b/arch/x86/kernel/traps.c
21396 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
21397
21398 /* Do we ignore FPU interrupts ? */
21399 char ignore_fpu_irq;
21400 -
21401 -/*
21402 - * The IDT has to be page-aligned to simplify the Pentium
21403 - * F0 0F bug workaround.
21404 - */
21405 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
21406 #endif
21407
21408 DECLARE_BITMAP(used_vectors, NR_VECTORS);
21409 @@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
21410 static inline void
21411 die_if_kernel(const char *str, struct pt_regs *regs, long err)
21412 {
21413 - if (!user_mode_vm(regs))
21414 + if (!user_mode(regs))
21415 die(str, regs, err);
21416 }
21417 #endif
21418
21419 static void __kprobes
21420 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21421 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
21422 long error_code, siginfo_t *info)
21423 {
21424 struct task_struct *tsk = current;
21425
21426 #ifdef CONFIG_X86_32
21427 - if (regs->flags & X86_VM_MASK) {
21428 + if (v8086_mode(regs)) {
21429 /*
21430 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
21431 * On nmi (interrupt 2), do_trap should not be called.
21432 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21433 }
21434 #endif
21435
21436 - if (!user_mode(regs))
21437 + if (!user_mode_novm(regs))
21438 goto kernel_trap;
21439
21440 #ifdef CONFIG_X86_32
21441 @@ -158,7 +152,7 @@ trap_signal:
21442 printk_ratelimit()) {
21443 printk(KERN_INFO
21444 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
21445 - tsk->comm, tsk->pid, str,
21446 + tsk->comm, task_pid_nr(tsk), str,
21447 regs->ip, regs->sp, error_code);
21448 print_vma_addr(" in ", regs->ip);
21449 printk("\n");
21450 @@ -175,8 +169,20 @@ kernel_trap:
21451 if (!fixup_exception(regs)) {
21452 tsk->thread.error_code = error_code;
21453 tsk->thread.trap_no = trapnr;
21454 +
21455 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21456 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
21457 + str = "PAX: suspicious stack segment fault";
21458 +#endif
21459 +
21460 die(str, regs, error_code);
21461 }
21462 +
21463 +#ifdef CONFIG_PAX_REFCOUNT
21464 + if (trapnr == 4)
21465 + pax_report_refcount_overflow(regs);
21466 +#endif
21467 +
21468 return;
21469
21470 #ifdef CONFIG_X86_32
21471 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
21472 conditional_sti(regs);
21473
21474 #ifdef CONFIG_X86_32
21475 - if (regs->flags & X86_VM_MASK)
21476 + if (v8086_mode(regs))
21477 goto gp_in_vm86;
21478 #endif
21479
21480 tsk = current;
21481 - if (!user_mode(regs))
21482 + if (!user_mode_novm(regs))
21483 goto gp_in_kernel;
21484
21485 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21486 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
21487 + struct mm_struct *mm = tsk->mm;
21488 + unsigned long limit;
21489 +
21490 + down_write(&mm->mmap_sem);
21491 + limit = mm->context.user_cs_limit;
21492 + if (limit < TASK_SIZE) {
21493 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
21494 + up_write(&mm->mmap_sem);
21495 + return;
21496 + }
21497 + up_write(&mm->mmap_sem);
21498 + }
21499 +#endif
21500 +
21501 tsk->thread.error_code = error_code;
21502 tsk->thread.trap_no = 13;
21503
21504 @@ -305,6 +327,13 @@ gp_in_kernel:
21505 if (notify_die(DIE_GPF, "general protection fault", regs,
21506 error_code, 13, SIGSEGV) == NOTIFY_STOP)
21507 return;
21508 +
21509 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21510 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
21511 + die("PAX: suspicious general protection fault", regs, error_code);
21512 + else
21513 +#endif
21514 +
21515 die("general protection fault", regs, error_code);
21516 }
21517
21518 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
21519 dotraplinkage notrace __kprobes void
21520 do_nmi(struct pt_regs *regs, long error_code)
21521 {
21522 +
21523 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21524 + if (!user_mode(regs)) {
21525 + unsigned long cs = regs->cs & 0xFFFF;
21526 + unsigned long ip = ktva_ktla(regs->ip);
21527 +
21528 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21529 + regs->ip = ip;
21530 + }
21531 +#endif
21532 +
21533 nmi_enter();
21534
21535 inc_irq_stat(__nmi_count);
21536 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21537 }
21538
21539 #ifdef CONFIG_X86_32
21540 - if (regs->flags & X86_VM_MASK)
21541 + if (v8086_mode(regs))
21542 goto debug_vm86;
21543 #endif
21544
21545 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21546 * kernel space (but re-enable TF when returning to user mode).
21547 */
21548 if (condition & DR_STEP) {
21549 - if (!user_mode(regs))
21550 + if (!user_mode_novm(regs))
21551 goto clear_TF_reenable;
21552 }
21553
21554 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
21555 * Handle strange cache flush from user space exception
21556 * in all other cases. This is undocumented behaviour.
21557 */
21558 - if (regs->flags & X86_VM_MASK) {
21559 + if (v8086_mode(regs)) {
21560 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
21561 return;
21562 }
21563 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
21564 void __math_state_restore(void)
21565 {
21566 struct thread_info *thread = current_thread_info();
21567 - struct task_struct *tsk = thread->task;
21568 + struct task_struct *tsk = current;
21569
21570 /*
21571 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
21572 @@ -825,8 +865,7 @@ void __math_state_restore(void)
21573 */
21574 asmlinkage void math_state_restore(void)
21575 {
21576 - struct thread_info *thread = current_thread_info();
21577 - struct task_struct *tsk = thread->task;
21578 + struct task_struct *tsk = current;
21579
21580 if (!tsk_used_math(tsk)) {
21581 local_irq_enable();
21582 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
21583 new file mode 100644
21584 index 0000000..50c5edd
21585 --- /dev/null
21586 +++ b/arch/x86/kernel/verify_cpu.S
21587 @@ -0,0 +1,140 @@
21588 +/*
21589 + *
21590 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
21591 + * code has been borrowed from boot/setup.S and was introduced by
21592 + * Andi Kleen.
21593 + *
21594 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21595 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21596 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21597 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
21598 + *
21599 + * This source code is licensed under the GNU General Public License,
21600 + * Version 2. See the file COPYING for more details.
21601 + *
21602 + * This is a common code for verification whether CPU supports
21603 + * long mode and SSE or not. It is not called directly instead this
21604 + * file is included at various places and compiled in that context.
21605 + * This file is expected to run in 32bit code. Currently:
21606 + *
21607 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
21608 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
21609 + * arch/x86/kernel/head_32.S: processor startup
21610 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
21611 + *
21612 + * verify_cpu, returns the status of longmode and SSE in register %eax.
21613 + * 0: Success 1: Failure
21614 + *
21615 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
21616 + *
21617 + * The caller needs to check for the error code and take the action
21618 + * appropriately. Either display a message or halt.
21619 + */
21620 +
21621 +#include <asm/cpufeature.h>
21622 +#include <asm/msr-index.h>
21623 +
21624 +verify_cpu:
21625 + pushfl # Save caller passed flags
21626 + pushl $0 # Kill any dangerous flags
21627 + popfl
21628 +
21629 + pushfl # standard way to check for cpuid
21630 + popl %eax
21631 + movl %eax,%ebx
21632 + xorl $0x200000,%eax
21633 + pushl %eax
21634 + popfl
21635 + pushfl
21636 + popl %eax
21637 + cmpl %eax,%ebx
21638 + jz verify_cpu_no_longmode # cpu has no cpuid
21639 +
21640 + movl $0x0,%eax # See if cpuid 1 is implemented
21641 + cpuid
21642 + cmpl $0x1,%eax
21643 + jb verify_cpu_no_longmode # no cpuid 1
21644 +
21645 + xor %di,%di
21646 + cmpl $0x68747541,%ebx # AuthenticAMD
21647 + jnz verify_cpu_noamd
21648 + cmpl $0x69746e65,%edx
21649 + jnz verify_cpu_noamd
21650 + cmpl $0x444d4163,%ecx
21651 + jnz verify_cpu_noamd
21652 + mov $1,%di # cpu is from AMD
21653 + jmp verify_cpu_check
21654 +
21655 +verify_cpu_noamd:
21656 + cmpl $0x756e6547,%ebx # GenuineIntel?
21657 + jnz verify_cpu_check
21658 + cmpl $0x49656e69,%edx
21659 + jnz verify_cpu_check
21660 + cmpl $0x6c65746e,%ecx
21661 + jnz verify_cpu_check
21662 +
21663 + # only call IA32_MISC_ENABLE when:
21664 + # family > 6 || (family == 6 && model >= 0xd)
21665 + movl $0x1, %eax # check CPU family and model
21666 + cpuid
21667 + movl %eax, %ecx
21668 +
21669 + andl $0x0ff00f00, %eax # mask family and extended family
21670 + shrl $8, %eax
21671 + cmpl $6, %eax
21672 + ja verify_cpu_clear_xd # family > 6, ok
21673 + jb verify_cpu_check # family < 6, skip
21674 +
21675 + andl $0x000f00f0, %ecx # mask model and extended model
21676 + shrl $4, %ecx
21677 + cmpl $0xd, %ecx
21678 + jb verify_cpu_check # family == 6, model < 0xd, skip
21679 +
21680 +verify_cpu_clear_xd:
21681 + movl $MSR_IA32_MISC_ENABLE, %ecx
21682 + rdmsr
21683 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
21684 + jnc verify_cpu_check # only write MSR if bit was changed
21685 + wrmsr
21686 +
21687 +verify_cpu_check:
21688 + movl $0x1,%eax # Does the cpu have what it takes
21689 + cpuid
21690 + andl $REQUIRED_MASK0,%edx
21691 + xorl $REQUIRED_MASK0,%edx
21692 + jnz verify_cpu_no_longmode
21693 +
21694 + movl $0x80000000,%eax # See if extended cpuid is implemented
21695 + cpuid
21696 + cmpl $0x80000001,%eax
21697 + jb verify_cpu_no_longmode # no extended cpuid
21698 +
21699 + movl $0x80000001,%eax # Does the cpu have what it takes
21700 + cpuid
21701 + andl $REQUIRED_MASK1,%edx
21702 + xorl $REQUIRED_MASK1,%edx
21703 + jnz verify_cpu_no_longmode
21704 +
21705 +verify_cpu_sse_test:
21706 + movl $1,%eax
21707 + cpuid
21708 + andl $SSE_MASK,%edx
21709 + cmpl $SSE_MASK,%edx
21710 + je verify_cpu_sse_ok
21711 + test %di,%di
21712 + jz verify_cpu_no_longmode # only try to force SSE on AMD
21713 + movl $MSR_K7_HWCR,%ecx
21714 + rdmsr
21715 + btr $15,%eax # enable SSE
21716 + wrmsr
21717 + xor %di,%di # don't loop
21718 + jmp verify_cpu_sse_test # try again
21719 +
21720 +verify_cpu_no_longmode:
21721 + popfl # Restore caller passed flags
21722 + movl $1,%eax
21723 + ret
21724 +verify_cpu_sse_ok:
21725 + popfl # Restore caller passed flags
21726 + xorl %eax, %eax
21727 + ret
21728 diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
21729 deleted file mode 100644
21730 index 45b6f8a..0000000
21731 --- a/arch/x86/kernel/verify_cpu_64.S
21732 +++ /dev/null
21733 @@ -1,105 +0,0 @@
21734 -/*
21735 - *
21736 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
21737 - * code has been borrowed from boot/setup.S and was introduced by
21738 - * Andi Kleen.
21739 - *
21740 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21741 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21742 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21743 - *
21744 - * This source code is licensed under the GNU General Public License,
21745 - * Version 2. See the file COPYING for more details.
21746 - *
21747 - * This is a common code for verification whether CPU supports
21748 - * long mode and SSE or not. It is not called directly instead this
21749 - * file is included at various places and compiled in that context.
21750 - * Following are the current usage.
21751 - *
21752 - * This file is included by both 16bit and 32bit code.
21753 - *
21754 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
21755 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
21756 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
21757 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
21758 - *
21759 - * verify_cpu, returns the status of cpu check in register %eax.
21760 - * 0: Success 1: Failure
21761 - *
21762 - * The caller needs to check for the error code and take the action
21763 - * appropriately. Either display a message or halt.
21764 - */
21765 -
21766 -#include <asm/cpufeature.h>
21767 -
21768 -verify_cpu:
21769 - pushfl # Save caller passed flags
21770 - pushl $0 # Kill any dangerous flags
21771 - popfl
21772 -
21773 - pushfl # standard way to check for cpuid
21774 - popl %eax
21775 - movl %eax,%ebx
21776 - xorl $0x200000,%eax
21777 - pushl %eax
21778 - popfl
21779 - pushfl
21780 - popl %eax
21781 - cmpl %eax,%ebx
21782 - jz verify_cpu_no_longmode # cpu has no cpuid
21783 -
21784 - movl $0x0,%eax # See if cpuid 1 is implemented
21785 - cpuid
21786 - cmpl $0x1,%eax
21787 - jb verify_cpu_no_longmode # no cpuid 1
21788 -
21789 - xor %di,%di
21790 - cmpl $0x68747541,%ebx # AuthenticAMD
21791 - jnz verify_cpu_noamd
21792 - cmpl $0x69746e65,%edx
21793 - jnz verify_cpu_noamd
21794 - cmpl $0x444d4163,%ecx
21795 - jnz verify_cpu_noamd
21796 - mov $1,%di # cpu is from AMD
21797 -
21798 -verify_cpu_noamd:
21799 - movl $0x1,%eax # Does the cpu have what it takes
21800 - cpuid
21801 - andl $REQUIRED_MASK0,%edx
21802 - xorl $REQUIRED_MASK0,%edx
21803 - jnz verify_cpu_no_longmode
21804 -
21805 - movl $0x80000000,%eax # See if extended cpuid is implemented
21806 - cpuid
21807 - cmpl $0x80000001,%eax
21808 - jb verify_cpu_no_longmode # no extended cpuid
21809 -
21810 - movl $0x80000001,%eax # Does the cpu have what it takes
21811 - cpuid
21812 - andl $REQUIRED_MASK1,%edx
21813 - xorl $REQUIRED_MASK1,%edx
21814 - jnz verify_cpu_no_longmode
21815 -
21816 -verify_cpu_sse_test:
21817 - movl $1,%eax
21818 - cpuid
21819 - andl $SSE_MASK,%edx
21820 - cmpl $SSE_MASK,%edx
21821 - je verify_cpu_sse_ok
21822 - test %di,%di
21823 - jz verify_cpu_no_longmode # only try to force SSE on AMD
21824 - movl $0xc0010015,%ecx # HWCR
21825 - rdmsr
21826 - btr $15,%eax # enable SSE
21827 - wrmsr
21828 - xor %di,%di # don't loop
21829 - jmp verify_cpu_sse_test # try again
21830 -
21831 -verify_cpu_no_longmode:
21832 - popfl # Restore caller passed flags
21833 - movl $1,%eax
21834 - ret
21835 -verify_cpu_sse_ok:
21836 - popfl # Restore caller passed flags
21837 - xorl %eax, %eax
21838 - ret
21839 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
21840 index 9c4e625..e9bb4ed 100644
21841 --- a/arch/x86/kernel/vm86_32.c
21842 +++ b/arch/x86/kernel/vm86_32.c
21843 @@ -41,6 +41,7 @@
21844 #include <linux/ptrace.h>
21845 #include <linux/audit.h>
21846 #include <linux/stddef.h>
21847 +#include <linux/grsecurity.h>
21848
21849 #include <asm/uaccess.h>
21850 #include <asm/io.h>
21851 @@ -109,6 +110,9 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
21852 /* convert vm86_regs to kernel_vm86_regs */
21853 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
21854 const struct vm86_regs __user *user,
21855 + unsigned extra) __size_overflow(3);
21856 +static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
21857 + const struct vm86_regs __user *user,
21858 unsigned extra)
21859 {
21860 int ret = 0;
21861 @@ -148,7 +152,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
21862 do_exit(SIGSEGV);
21863 }
21864
21865 - tss = &per_cpu(init_tss, get_cpu());
21866 + tss = init_tss + get_cpu();
21867 current->thread.sp0 = current->thread.saved_sp0;
21868 current->thread.sysenter_cs = __KERNEL_CS;
21869 load_sp0(tss, &current->thread);
21870 @@ -208,6 +212,13 @@ int sys_vm86old(struct pt_regs *regs)
21871 struct task_struct *tsk;
21872 int tmp, ret = -EPERM;
21873
21874 +#ifdef CONFIG_GRKERNSEC_VM86
21875 + if (!capable(CAP_SYS_RAWIO)) {
21876 + gr_handle_vm86();
21877 + goto out;
21878 + }
21879 +#endif
21880 +
21881 tsk = current;
21882 if (tsk->thread.saved_sp0)
21883 goto out;
21884 @@ -238,6 +249,14 @@ int sys_vm86(struct pt_regs *regs)
21885 int tmp, ret;
21886 struct vm86plus_struct __user *v86;
21887
21888 +#ifdef CONFIG_GRKERNSEC_VM86
21889 + if (!capable(CAP_SYS_RAWIO)) {
21890 + gr_handle_vm86();
21891 + ret = -EPERM;
21892 + goto out;
21893 + }
21894 +#endif
21895 +
21896 tsk = current;
21897 switch (regs->bx) {
21898 case VM86_REQUEST_IRQ:
21899 @@ -324,7 +343,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
21900 tsk->thread.saved_fs = info->regs32->fs;
21901 tsk->thread.saved_gs = get_user_gs(info->regs32);
21902
21903 - tss = &per_cpu(init_tss, get_cpu());
21904 + tss = init_tss + get_cpu();
21905 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
21906 if (cpu_has_sep)
21907 tsk->thread.sysenter_cs = 0;
21908 @@ -529,7 +548,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
21909 goto cannot_handle;
21910 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
21911 goto cannot_handle;
21912 - intr_ptr = (unsigned long __user *) (i << 2);
21913 + intr_ptr = (__force unsigned long __user *) (i << 2);
21914 if (get_user(segoffs, intr_ptr))
21915 goto cannot_handle;
21916 if ((segoffs >> 16) == BIOSSEG)
21917 diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
21918 index d430e4c..831f817 100644
21919 --- a/arch/x86/kernel/vmi_32.c
21920 +++ b/arch/x86/kernel/vmi_32.c
21921 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
21922 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
21923
21924 #define call_vrom_func(rom,func) \
21925 - (((VROMFUNC *)(rom->func))())
21926 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
21927
21928 #define call_vrom_long_func(rom,func,arg) \
21929 - (((VROMLONGFUNC *)(rom->func)) (arg))
21930 +({\
21931 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
21932 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
21933 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
21934 + __reloc;\
21935 +})
21936
21937 -static struct vrom_header *vmi_rom;
21938 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
21939 static int disable_pge;
21940 static int disable_pse;
21941 static int disable_sep;
21942 @@ -76,10 +81,10 @@ static struct {
21943 void (*set_initial_ap_state)(int, int);
21944 void (*halt)(void);
21945 void (*set_lazy_mode)(int mode);
21946 -} vmi_ops;
21947 +} __no_const vmi_ops __read_only;
21948
21949 /* Cached VMI operations */
21950 -struct vmi_timer_ops vmi_timer_ops;
21951 +struct vmi_timer_ops vmi_timer_ops __read_only;
21952
21953 /*
21954 * VMI patching routines.
21955 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
21956 static inline void patch_offset(void *insnbuf,
21957 unsigned long ip, unsigned long dest)
21958 {
21959 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
21960 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
21961 }
21962
21963 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21964 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21965 {
21966 u64 reloc;
21967 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
21968 +
21969 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
21970 switch(rel->type) {
21971 case VMI_RELOCATION_CALL_REL:
21972 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
21973
21974 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
21975 {
21976 - const pte_t pte = { .pte = 0 };
21977 + const pte_t pte = __pte(0ULL);
21978 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
21979 }
21980
21981 static void vmi_pmd_clear(pmd_t *pmd)
21982 {
21983 - const pte_t pte = { .pte = 0 };
21984 + const pte_t pte = __pte(0ULL);
21985 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
21986 }
21987 #endif
21988 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
21989 ap.ss = __KERNEL_DS;
21990 ap.esp = (unsigned long) start_esp;
21991
21992 - ap.ds = __USER_DS;
21993 - ap.es = __USER_DS;
21994 + ap.ds = __KERNEL_DS;
21995 + ap.es = __KERNEL_DS;
21996 ap.fs = __KERNEL_PERCPU;
21997 - ap.gs = __KERNEL_STACK_CANARY;
21998 + savesegment(gs, ap.gs);
21999
22000 ap.eflags = 0;
22001
22002 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
22003 paravirt_leave_lazy_mmu();
22004 }
22005
22006 +#ifdef CONFIG_PAX_KERNEXEC
22007 +static unsigned long vmi_pax_open_kernel(void)
22008 +{
22009 + return 0;
22010 +}
22011 +
22012 +static unsigned long vmi_pax_close_kernel(void)
22013 +{
22014 + return 0;
22015 +}
22016 +#endif
22017 +
22018 static inline int __init check_vmi_rom(struct vrom_header *rom)
22019 {
22020 struct pci_header *pci;
22021 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
22022 return 0;
22023 if (rom->vrom_signature != VMI_SIGNATURE)
22024 return 0;
22025 + if (rom->rom_length * 512 > sizeof(*rom)) {
22026 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
22027 + return 0;
22028 + }
22029 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
22030 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
22031 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
22032 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
22033 struct vrom_header *romstart;
22034 romstart = (struct vrom_header *)isa_bus_to_virt(base);
22035 if (check_vmi_rom(romstart)) {
22036 - vmi_rom = romstart;
22037 + vmi_rom = *romstart;
22038 return 1;
22039 }
22040 }
22041 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
22042
22043 para_fill(pv_irq_ops.safe_halt, Halt);
22044
22045 +#ifdef CONFIG_PAX_KERNEXEC
22046 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
22047 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
22048 +#endif
22049 +
22050 /*
22051 * Alternative instruction rewriting doesn't happen soon enough
22052 * to convert VMI_IRET to a call instead of a jump; so we have
22053 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
22054
22055 void __init vmi_init(void)
22056 {
22057 - if (!vmi_rom)
22058 + if (!vmi_rom.rom_signature)
22059 probe_vmi_rom();
22060 else
22061 - check_vmi_rom(vmi_rom);
22062 + check_vmi_rom(&vmi_rom);
22063
22064 /* In case probing for or validating the ROM failed, basil */
22065 - if (!vmi_rom)
22066 + if (!vmi_rom.rom_signature)
22067 return;
22068
22069 - reserve_top_address(-vmi_rom->virtual_top);
22070 + reserve_top_address(-vmi_rom.virtual_top);
22071
22072 #ifdef CONFIG_X86_IO_APIC
22073 /* This is virtual hardware; timer routing is wired correctly */
22074 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
22075 {
22076 unsigned long flags;
22077
22078 - if (!vmi_rom)
22079 + if (!vmi_rom.rom_signature)
22080 return;
22081
22082 local_irq_save(flags);
22083 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
22084 index 3c68fe2..12c8280 100644
22085 --- a/arch/x86/kernel/vmlinux.lds.S
22086 +++ b/arch/x86/kernel/vmlinux.lds.S
22087 @@ -26,6 +26,13 @@
22088 #include <asm/page_types.h>
22089 #include <asm/cache.h>
22090 #include <asm/boot.h>
22091 +#include <asm/segment.h>
22092 +
22093 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22094 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
22095 +#else
22096 +#define __KERNEL_TEXT_OFFSET 0
22097 +#endif
22098
22099 #undef i386 /* in case the preprocessor is a 32bit one */
22100
22101 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
22102 #ifdef CONFIG_X86_32
22103 OUTPUT_ARCH(i386)
22104 ENTRY(phys_startup_32)
22105 -jiffies = jiffies_64;
22106 #else
22107 OUTPUT_ARCH(i386:x86-64)
22108 ENTRY(phys_startup_64)
22109 -jiffies_64 = jiffies;
22110 #endif
22111
22112 PHDRS {
22113 text PT_LOAD FLAGS(5); /* R_E */
22114 - data PT_LOAD FLAGS(7); /* RWE */
22115 +#ifdef CONFIG_X86_32
22116 + module PT_LOAD FLAGS(5); /* R_E */
22117 +#endif
22118 +#ifdef CONFIG_XEN
22119 + rodata PT_LOAD FLAGS(5); /* R_E */
22120 +#else
22121 + rodata PT_LOAD FLAGS(4); /* R__ */
22122 +#endif
22123 + data PT_LOAD FLAGS(6); /* RW_ */
22124 #ifdef CONFIG_X86_64
22125 user PT_LOAD FLAGS(5); /* R_E */
22126 +#endif
22127 + init.begin PT_LOAD FLAGS(6); /* RW_ */
22128 #ifdef CONFIG_SMP
22129 percpu PT_LOAD FLAGS(6); /* RW_ */
22130 #endif
22131 + text.init PT_LOAD FLAGS(5); /* R_E */
22132 + text.exit PT_LOAD FLAGS(5); /* R_E */
22133 init PT_LOAD FLAGS(7); /* RWE */
22134 -#endif
22135 note PT_NOTE FLAGS(0); /* ___ */
22136 }
22137
22138 SECTIONS
22139 {
22140 #ifdef CONFIG_X86_32
22141 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
22142 - phys_startup_32 = startup_32 - LOAD_OFFSET;
22143 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
22144 #else
22145 - . = __START_KERNEL;
22146 - phys_startup_64 = startup_64 - LOAD_OFFSET;
22147 + . = __START_KERNEL;
22148 #endif
22149
22150 /* Text and read-only data */
22151 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
22152 - _text = .;
22153 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22154 /* bootstrapping code */
22155 +#ifdef CONFIG_X86_32
22156 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22157 +#else
22158 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22159 +#endif
22160 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22161 + _text = .;
22162 HEAD_TEXT
22163 #ifdef CONFIG_X86_32
22164 . = ALIGN(PAGE_SIZE);
22165 @@ -82,28 +102,71 @@ SECTIONS
22166 IRQENTRY_TEXT
22167 *(.fixup)
22168 *(.gnu.warning)
22169 - /* End of text section */
22170 - _etext = .;
22171 } :text = 0x9090
22172
22173 - NOTES :text :note
22174 + . += __KERNEL_TEXT_OFFSET;
22175
22176 - EXCEPTION_TABLE(16) :text = 0x9090
22177 +#ifdef CONFIG_X86_32
22178 + . = ALIGN(PAGE_SIZE);
22179 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
22180 + *(.vmi.rom)
22181 + } :module
22182 +
22183 + . = ALIGN(PAGE_SIZE);
22184 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
22185 +
22186 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
22187 + MODULES_EXEC_VADDR = .;
22188 + BYTE(0)
22189 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
22190 + . = ALIGN(HPAGE_SIZE);
22191 + MODULES_EXEC_END = . - 1;
22192 +#endif
22193 +
22194 + } :module
22195 +#endif
22196 +
22197 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
22198 + /* End of text section */
22199 + _etext = . - __KERNEL_TEXT_OFFSET;
22200 + }
22201 +
22202 +#ifdef CONFIG_X86_32
22203 + . = ALIGN(PAGE_SIZE);
22204 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
22205 + *(.idt)
22206 + . = ALIGN(PAGE_SIZE);
22207 + *(.empty_zero_page)
22208 + *(.swapper_pg_fixmap)
22209 + *(.swapper_pg_pmd)
22210 + *(.swapper_pg_dir)
22211 + *(.trampoline_pg_dir)
22212 + } :rodata
22213 +#endif
22214 +
22215 + . = ALIGN(PAGE_SIZE);
22216 + NOTES :rodata :note
22217 +
22218 + EXCEPTION_TABLE(16) :rodata
22219
22220 RO_DATA(PAGE_SIZE)
22221
22222 /* Data */
22223 .data : AT(ADDR(.data) - LOAD_OFFSET) {
22224 +
22225 +#ifdef CONFIG_PAX_KERNEXEC
22226 + . = ALIGN(HPAGE_SIZE);
22227 +#else
22228 + . = ALIGN(PAGE_SIZE);
22229 +#endif
22230 +
22231 /* Start of data section */
22232 _sdata = .;
22233
22234 /* init_task */
22235 INIT_TASK_DATA(THREAD_SIZE)
22236
22237 -#ifdef CONFIG_X86_32
22238 - /* 32 bit has nosave before _edata */
22239 NOSAVE_DATA
22240 -#endif
22241
22242 PAGE_ALIGNED_DATA(PAGE_SIZE)
22243
22244 @@ -112,6 +175,8 @@ SECTIONS
22245 DATA_DATA
22246 CONSTRUCTORS
22247
22248 + jiffies = jiffies_64;
22249 +
22250 /* rarely changed data like cpu maps */
22251 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
22252
22253 @@ -166,12 +231,6 @@ SECTIONS
22254 }
22255 vgetcpu_mode = VVIRT(.vgetcpu_mode);
22256
22257 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
22258 - .jiffies : AT(VLOAD(.jiffies)) {
22259 - *(.jiffies)
22260 - }
22261 - jiffies = VVIRT(.jiffies);
22262 -
22263 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
22264 *(.vsyscall_3)
22265 }
22266 @@ -187,12 +246,19 @@ SECTIONS
22267 #endif /* CONFIG_X86_64 */
22268
22269 /* Init code and data - will be freed after init */
22270 - . = ALIGN(PAGE_SIZE);
22271 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
22272 + BYTE(0)
22273 +
22274 +#ifdef CONFIG_PAX_KERNEXEC
22275 + . = ALIGN(HPAGE_SIZE);
22276 +#else
22277 + . = ALIGN(PAGE_SIZE);
22278 +#endif
22279 +
22280 __init_begin = .; /* paired with __init_end */
22281 - }
22282 + } :init.begin
22283
22284 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
22285 +#ifdef CONFIG_SMP
22286 /*
22287 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
22288 * output PHDR, so the next output section - .init.text - should
22289 @@ -201,12 +267,27 @@ SECTIONS
22290 PERCPU_VADDR(0, :percpu)
22291 #endif
22292
22293 - INIT_TEXT_SECTION(PAGE_SIZE)
22294 -#ifdef CONFIG_X86_64
22295 - :init
22296 -#endif
22297 + . = ALIGN(PAGE_SIZE);
22298 + init_begin = .;
22299 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
22300 + VMLINUX_SYMBOL(_sinittext) = .;
22301 + INIT_TEXT
22302 + VMLINUX_SYMBOL(_einittext) = .;
22303 + . = ALIGN(PAGE_SIZE);
22304 + } :text.init
22305
22306 - INIT_DATA_SECTION(16)
22307 + /*
22308 + * .exit.text is discard at runtime, not link time, to deal with
22309 + * references from .altinstructions and .eh_frame
22310 + */
22311 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22312 + EXIT_TEXT
22313 + . = ALIGN(16);
22314 + } :text.exit
22315 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
22316 +
22317 + . = ALIGN(PAGE_SIZE);
22318 + INIT_DATA_SECTION(16) :init
22319
22320 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
22321 __x86_cpu_dev_start = .;
22322 @@ -232,19 +313,11 @@ SECTIONS
22323 *(.altinstr_replacement)
22324 }
22325
22326 - /*
22327 - * .exit.text is discard at runtime, not link time, to deal with
22328 - * references from .altinstructions and .eh_frame
22329 - */
22330 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
22331 - EXIT_TEXT
22332 - }
22333 -
22334 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
22335 EXIT_DATA
22336 }
22337
22338 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
22339 +#ifndef CONFIG_SMP
22340 PERCPU(PAGE_SIZE)
22341 #endif
22342
22343 @@ -267,12 +340,6 @@ SECTIONS
22344 . = ALIGN(PAGE_SIZE);
22345 }
22346
22347 -#ifdef CONFIG_X86_64
22348 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
22349 - NOSAVE_DATA
22350 - }
22351 -#endif
22352 -
22353 /* BSS */
22354 . = ALIGN(PAGE_SIZE);
22355 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
22356 @@ -288,6 +355,7 @@ SECTIONS
22357 __brk_base = .;
22358 . += 64 * 1024; /* 64k alignment slop space */
22359 *(.brk_reservation) /* areas brk users have reserved */
22360 + . = ALIGN(HPAGE_SIZE);
22361 __brk_limit = .;
22362 }
22363
22364 @@ -316,13 +384,12 @@ SECTIONS
22365 * for the boot processor.
22366 */
22367 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
22368 -INIT_PER_CPU(gdt_page);
22369 INIT_PER_CPU(irq_stack_union);
22370
22371 /*
22372 * Build-time check on the image size:
22373 */
22374 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
22375 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
22376 "kernel image bigger than KERNEL_IMAGE_SIZE");
22377
22378 #ifdef CONFIG_SMP
22379 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
22380 index 62f39d7..3bc46a1 100644
22381 --- a/arch/x86/kernel/vsyscall_64.c
22382 +++ b/arch/x86/kernel/vsyscall_64.c
22383 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
22384
22385 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
22386 /* copy vsyscall data */
22387 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
22388 vsyscall_gtod_data.clock.vread = clock->vread;
22389 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
22390 vsyscall_gtod_data.clock.mask = clock->mask;
22391 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
22392 We do this here because otherwise user space would do it on
22393 its own in a likely inferior way (no access to jiffies).
22394 If you don't like it pass NULL. */
22395 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
22396 + if (tcache && tcache->blob[0] == (j = jiffies)) {
22397 p = tcache->blob[1];
22398 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
22399 /* Load per CPU data from RDTSCP */
22400 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
22401 index 3909e3b..5433a97 100644
22402 --- a/arch/x86/kernel/x8664_ksyms_64.c
22403 +++ b/arch/x86/kernel/x8664_ksyms_64.c
22404 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
22405
22406 EXPORT_SYMBOL(copy_user_generic);
22407 EXPORT_SYMBOL(__copy_user_nocache);
22408 -EXPORT_SYMBOL(copy_from_user);
22409 -EXPORT_SYMBOL(copy_to_user);
22410 EXPORT_SYMBOL(__copy_from_user_inatomic);
22411
22412 EXPORT_SYMBOL(copy_page);
22413 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
22414 index c5ee17e..d63218f 100644
22415 --- a/arch/x86/kernel/xsave.c
22416 +++ b/arch/x86/kernel/xsave.c
22417 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
22418 fx_sw_user->xstate_size > fx_sw_user->extended_size)
22419 return -1;
22420
22421 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
22422 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
22423 fx_sw_user->extended_size -
22424 FP_XSTATE_MAGIC2_SIZE));
22425 /*
22426 @@ -196,7 +196,7 @@ fx_only:
22427 * the other extended state.
22428 */
22429 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
22430 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
22431 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
22432 }
22433
22434 /*
22435 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
22436 if (task_thread_info(tsk)->status & TS_XSAVE)
22437 err = restore_user_xstate(buf);
22438 else
22439 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
22440 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
22441 buf);
22442 if (unlikely(err)) {
22443 /*
22444 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
22445 index 1350e43..a94b011 100644
22446 --- a/arch/x86/kvm/emulate.c
22447 +++ b/arch/x86/kvm/emulate.c
22448 @@ -81,8 +81,8 @@
22449 #define Src2CL (1<<29)
22450 #define Src2ImmByte (2<<29)
22451 #define Src2One (3<<29)
22452 -#define Src2Imm16 (4<<29)
22453 -#define Src2Mask (7<<29)
22454 +#define Src2Imm16 (4U<<29)
22455 +#define Src2Mask (7U<<29)
22456
22457 enum {
22458 Group1_80, Group1_81, Group1_82, Group1_83,
22459 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
22460
22461 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
22462 do { \
22463 + unsigned long _tmp; \
22464 __asm__ __volatile__ ( \
22465 _PRE_EFLAGS("0", "4", "2") \
22466 _op _suffix " %"_x"3,%1; " \
22467 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
22468 /* Raw emulation: instruction has two explicit operands. */
22469 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
22470 do { \
22471 - unsigned long _tmp; \
22472 - \
22473 switch ((_dst).bytes) { \
22474 case 2: \
22475 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
22476 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
22477
22478 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
22479 do { \
22480 - unsigned long _tmp; \
22481 switch ((_dst).bytes) { \
22482 case 1: \
22483 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
22484 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
22485 index 8dfeaaa..4daa395 100644
22486 --- a/arch/x86/kvm/lapic.c
22487 +++ b/arch/x86/kvm/lapic.c
22488 @@ -52,7 +52,7 @@
22489 #define APIC_BUS_CYCLE_NS 1
22490
22491 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
22492 -#define apic_debug(fmt, arg...)
22493 +#define apic_debug(fmt, arg...) do {} while (0)
22494
22495 #define APIC_LVT_NUM 6
22496 /* 14 is the version for Xeon and Pentium 8.4.8*/
22497 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
22498 index 3bc2707..dd157e2 100644
22499 --- a/arch/x86/kvm/paging_tmpl.h
22500 +++ b/arch/x86/kvm/paging_tmpl.h
22501 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
22502 int level = PT_PAGE_TABLE_LEVEL;
22503 unsigned long mmu_seq;
22504
22505 + pax_track_stack();
22506 +
22507 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
22508 kvm_mmu_audit(vcpu, "pre page fault");
22509
22510 @@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
22511 kvm_mmu_free_some_pages(vcpu);
22512 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
22513 level, &write_pt, pfn);
22514 + (void)sptep;
22515 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
22516 sptep, *sptep, write_pt);
22517
22518 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
22519 index 7c6e63e..1b7dac1 100644
22520 --- a/arch/x86/kvm/svm.c
22521 +++ b/arch/x86/kvm/svm.c
22522 @@ -2240,6 +2240,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
22523 return 1;
22524 }
22525
22526 +static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) __size_overflow(3);
22527 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
22528 {
22529 struct vcpu_svm *svm = to_svm(vcpu);
22530 @@ -2486,7 +2487,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
22531 int cpu = raw_smp_processor_id();
22532
22533 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
22534 +
22535 + pax_open_kernel();
22536 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
22537 + pax_close_kernel();
22538 +
22539 load_TR_desc();
22540 }
22541
22542 @@ -2947,7 +2952,7 @@ static bool svm_gb_page_enable(void)
22543 return true;
22544 }
22545
22546 -static struct kvm_x86_ops svm_x86_ops = {
22547 +static const struct kvm_x86_ops svm_x86_ops = {
22548 .cpu_has_kvm_support = has_svm,
22549 .disabled_by_bios = is_disabled,
22550 .hardware_setup = svm_hardware_setup,
22551 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
22552 index e6d925f..8cdd779 100644
22553 --- a/arch/x86/kvm/vmx.c
22554 +++ b/arch/x86/kvm/vmx.c
22555 @@ -570,7 +570,11 @@ static void reload_tss(void)
22556
22557 kvm_get_gdt(&gdt);
22558 descs = (void *)gdt.base;
22559 +
22560 + pax_open_kernel();
22561 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
22562 + pax_close_kernel();
22563 +
22564 load_TR_desc();
22565 }
22566
22567 @@ -1035,6 +1039,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
22568 * Returns 0 on success, non-0 otherwise.
22569 * Assumes vcpu_load() was already called.
22570 */
22571 +static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) __size_overflow(3);
22572 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
22573 {
22574 struct vcpu_vmx *vmx = to_vmx(vcpu);
22575 @@ -1410,8 +1415,11 @@ static __init int hardware_setup(void)
22576 if (!cpu_has_vmx_flexpriority())
22577 flexpriority_enabled = 0;
22578
22579 - if (!cpu_has_vmx_tpr_shadow())
22580 - kvm_x86_ops->update_cr8_intercept = NULL;
22581 + if (!cpu_has_vmx_tpr_shadow()) {
22582 + pax_open_kernel();
22583 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
22584 + pax_close_kernel();
22585 + }
22586
22587 if (enable_ept && !cpu_has_vmx_ept_2m_page())
22588 kvm_disable_largepages();
22589 @@ -2362,7 +2370,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
22590 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
22591
22592 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
22593 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
22594 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
22595 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
22596 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
22597 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
22598 @@ -3718,6 +3726,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22599 "jmp .Lkvm_vmx_return \n\t"
22600 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
22601 ".Lkvm_vmx_return: "
22602 +
22603 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22604 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
22605 + ".Lkvm_vmx_return2: "
22606 +#endif
22607 +
22608 /* Save guest registers, load host registers, keep flags */
22609 "xchg %0, (%%"R"sp) \n\t"
22610 "mov %%"R"ax, %c[rax](%0) \n\t"
22611 @@ -3764,8 +3778,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22612 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
22613 #endif
22614 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
22615 +
22616 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22617 + ,[cs]"i"(__KERNEL_CS)
22618 +#endif
22619 +
22620 : "cc", "memory"
22621 - , R"bx", R"di", R"si"
22622 + , R"ax", R"bx", R"di", R"si"
22623 #ifdef CONFIG_X86_64
22624 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
22625 #endif
22626 @@ -3782,7 +3801,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22627 if (vmx->rmode.irq.pending)
22628 fixup_rmode_irq(vmx);
22629
22630 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
22631 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
22632 +
22633 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22634 + loadsegment(fs, __KERNEL_PERCPU);
22635 +#endif
22636 +
22637 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22638 + __set_fs(current_thread_info()->addr_limit);
22639 +#endif
22640 +
22641 vmx->launched = 1;
22642
22643 vmx_complete_interrupts(vmx);
22644 @@ -3957,7 +3985,7 @@ static bool vmx_gb_page_enable(void)
22645 return false;
22646 }
22647
22648 -static struct kvm_x86_ops vmx_x86_ops = {
22649 +static const struct kvm_x86_ops vmx_x86_ops = {
22650 .cpu_has_kvm_support = cpu_has_kvm_support,
22651 .disabled_by_bios = vmx_disabled_by_bios,
22652 .hardware_setup = hardware_setup,
22653 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
22654 index df1cefb..ff86cc2 100644
22655 --- a/arch/x86/kvm/x86.c
22656 +++ b/arch/x86/kvm/x86.c
22657 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
22658 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
22659 struct kvm_cpuid_entry2 __user *entries);
22660
22661 -struct kvm_x86_ops *kvm_x86_ops;
22662 +const struct kvm_x86_ops *kvm_x86_ops;
22663 EXPORT_SYMBOL_GPL(kvm_x86_ops);
22664
22665 int ignore_msrs = 0;
22666 @@ -547,6 +547,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
22667 return kvm_set_msr(vcpu, index, *data);
22668 }
22669
22670 +static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) __size_overflow(2);
22671 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
22672 {
22673 int version;
22674 @@ -1430,15 +1431,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
22675 struct kvm_cpuid2 *cpuid,
22676 struct kvm_cpuid_entry2 __user *entries)
22677 {
22678 - int r;
22679 + int r, i;
22680
22681 r = -E2BIG;
22682 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
22683 goto out;
22684 r = -EFAULT;
22685 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
22686 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
22687 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
22688 goto out;
22689 + for (i = 0; i < cpuid->nent; ++i) {
22690 + struct kvm_cpuid_entry2 cpuid_entry;
22691 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
22692 + goto out;
22693 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
22694 + }
22695 vcpu->arch.cpuid_nent = cpuid->nent;
22696 kvm_apic_set_version(vcpu);
22697 return 0;
22698 @@ -1451,16 +1457,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
22699 struct kvm_cpuid2 *cpuid,
22700 struct kvm_cpuid_entry2 __user *entries)
22701 {
22702 - int r;
22703 + int r, i;
22704
22705 vcpu_load(vcpu);
22706 r = -E2BIG;
22707 if (cpuid->nent < vcpu->arch.cpuid_nent)
22708 goto out;
22709 r = -EFAULT;
22710 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
22711 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
22712 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
22713 goto out;
22714 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
22715 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
22716 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
22717 + goto out;
22718 + }
22719 return 0;
22720
22721 out:
22722 @@ -1678,7 +1688,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
22723 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
22724 struct kvm_interrupt *irq)
22725 {
22726 - if (irq->irq < 0 || irq->irq >= 256)
22727 + if (irq->irq >= 256)
22728 return -EINVAL;
22729 if (irqchip_in_kernel(vcpu->kvm))
22730 return -ENXIO;
22731 @@ -2764,7 +2774,14 @@ int emulator_write_emulated(unsigned long addr,
22732 }
22733 EXPORT_SYMBOL_GPL(emulator_write_emulated);
22734
22735 -static int emulator_cmpxchg_emulated(unsigned long addr,
22736 +static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
22737 + unsigned long addr,
22738 + const void *old,
22739 + const void *new,
22740 + unsigned int bytes,
22741 + struct kvm_vcpu *vcpu) __size_overflow(5);
22742 +static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
22743 + unsigned long addr,
22744 const void *old,
22745 const void *new,
22746 unsigned int bytes,
22747 @@ -3260,10 +3277,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
22748 .notifier_call = kvmclock_cpufreq_notifier
22749 };
22750
22751 -int kvm_arch_init(void *opaque)
22752 +int kvm_arch_init(const void *opaque)
22753 {
22754 int r, cpu;
22755 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
22756 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
22757
22758 if (kvm_x86_ops) {
22759 printk(KERN_ERR "kvm: already loaded the other module\n");
22760 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
22761 index 7e59dc1..b88c98f 100644
22762 --- a/arch/x86/lguest/boot.c
22763 +++ b/arch/x86/lguest/boot.c
22764 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
22765 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
22766 * Launcher to reboot us.
22767 */
22768 -static void lguest_restart(char *reason)
22769 +static __noreturn void lguest_restart(char *reason)
22770 {
22771 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
22772 + BUG();
22773 }
22774
22775 /*G:050
22776 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
22777 index 824fa0b..c619e96 100644
22778 --- a/arch/x86/lib/atomic64_32.c
22779 +++ b/arch/x86/lib/atomic64_32.c
22780 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
22781 }
22782 EXPORT_SYMBOL(atomic64_cmpxchg);
22783
22784 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
22785 +{
22786 + return cmpxchg8b(&ptr->counter, old_val, new_val);
22787 +}
22788 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
22789 +
22790 /**
22791 * atomic64_xchg - xchg atomic64 variable
22792 * @ptr: pointer to type atomic64_t
22793 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
22794 EXPORT_SYMBOL(atomic64_xchg);
22795
22796 /**
22797 + * atomic64_xchg_unchecked - xchg atomic64 variable
22798 + * @ptr: pointer to type atomic64_unchecked_t
22799 + * @new_val: value to assign
22800 + *
22801 + * Atomically xchgs the value of @ptr to @new_val and returns
22802 + * the old value.
22803 + */
22804 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22805 +{
22806 + /*
22807 + * Try first with a (possibly incorrect) assumption about
22808 + * what we have there. We'll do two loops most likely,
22809 + * but we'll get an ownership MESI transaction straight away
22810 + * instead of a read transaction followed by a
22811 + * flush-for-ownership transaction:
22812 + */
22813 + u64 old_val, real_val = 0;
22814 +
22815 + do {
22816 + old_val = real_val;
22817 +
22818 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22819 +
22820 + } while (real_val != old_val);
22821 +
22822 + return old_val;
22823 +}
22824 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
22825 +
22826 +/**
22827 * atomic64_set - set atomic64 variable
22828 * @ptr: pointer to type atomic64_t
22829 * @new_val: value to assign
22830 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
22831 EXPORT_SYMBOL(atomic64_set);
22832
22833 /**
22834 -EXPORT_SYMBOL(atomic64_read);
22835 + * atomic64_unchecked_set - set atomic64 variable
22836 + * @ptr: pointer to type atomic64_unchecked_t
22837 + * @new_val: value to assign
22838 + *
22839 + * Atomically sets the value of @ptr to @new_val.
22840 + */
22841 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22842 +{
22843 + atomic64_xchg_unchecked(ptr, new_val);
22844 +}
22845 +EXPORT_SYMBOL(atomic64_set_unchecked);
22846 +
22847 +/**
22848 * atomic64_add_return - add and return
22849 * @delta: integer value to add
22850 * @ptr: pointer to type atomic64_t
22851 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
22852 }
22853 EXPORT_SYMBOL(atomic64_add_return);
22854
22855 +/**
22856 + * atomic64_add_return_unchecked - add and return
22857 + * @delta: integer value to add
22858 + * @ptr: pointer to type atomic64_unchecked_t
22859 + *
22860 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
22861 + */
22862 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22863 +{
22864 + /*
22865 + * Try first with a (possibly incorrect) assumption about
22866 + * what we have there. We'll do two loops most likely,
22867 + * but we'll get an ownership MESI transaction straight away
22868 + * instead of a read transaction followed by a
22869 + * flush-for-ownership transaction:
22870 + */
22871 + u64 old_val, new_val, real_val = 0;
22872 +
22873 + do {
22874 + old_val = real_val;
22875 + new_val = old_val + delta;
22876 +
22877 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22878 +
22879 + } while (real_val != old_val);
22880 +
22881 + return new_val;
22882 +}
22883 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
22884 +
22885 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
22886 {
22887 return atomic64_add_return(-delta, ptr);
22888 }
22889 EXPORT_SYMBOL(atomic64_sub_return);
22890
22891 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22892 +{
22893 + return atomic64_add_return_unchecked(-delta, ptr);
22894 +}
22895 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
22896 +
22897 u64 atomic64_inc_return(atomic64_t *ptr)
22898 {
22899 return atomic64_add_return(1, ptr);
22900 }
22901 EXPORT_SYMBOL(atomic64_inc_return);
22902
22903 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
22904 +{
22905 + return atomic64_add_return_unchecked(1, ptr);
22906 +}
22907 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
22908 +
22909 u64 atomic64_dec_return(atomic64_t *ptr)
22910 {
22911 return atomic64_sub_return(1, ptr);
22912 }
22913 EXPORT_SYMBOL(atomic64_dec_return);
22914
22915 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
22916 +{
22917 + return atomic64_sub_return_unchecked(1, ptr);
22918 +}
22919 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
22920 +
22921 /**
22922 * atomic64_add - add integer to atomic64 variable
22923 * @delta: integer value to add
22924 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
22925 EXPORT_SYMBOL(atomic64_add);
22926
22927 /**
22928 + * atomic64_add_unchecked - add integer to atomic64 variable
22929 + * @delta: integer value to add
22930 + * @ptr: pointer to type atomic64_unchecked_t
22931 + *
22932 + * Atomically adds @delta to @ptr.
22933 + */
22934 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22935 +{
22936 + atomic64_add_return_unchecked(delta, ptr);
22937 +}
22938 +EXPORT_SYMBOL(atomic64_add_unchecked);
22939 +
22940 +/**
22941 * atomic64_sub - subtract the atomic64 variable
22942 * @delta: integer value to subtract
22943 * @ptr: pointer to type atomic64_t
22944 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
22945 EXPORT_SYMBOL(atomic64_sub);
22946
22947 /**
22948 + * atomic64_sub_unchecked - subtract the atomic64 variable
22949 + * @delta: integer value to subtract
22950 + * @ptr: pointer to type atomic64_unchecked_t
22951 + *
22952 + * Atomically subtracts @delta from @ptr.
22953 + */
22954 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22955 +{
22956 + atomic64_add_unchecked(-delta, ptr);
22957 +}
22958 +EXPORT_SYMBOL(atomic64_sub_unchecked);
22959 +
22960 +/**
22961 * atomic64_sub_and_test - subtract value from variable and test result
22962 * @delta: integer value to subtract
22963 * @ptr: pointer to type atomic64_t
22964 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
22965 EXPORT_SYMBOL(atomic64_inc);
22966
22967 /**
22968 + * atomic64_inc_unchecked - increment atomic64 variable
22969 + * @ptr: pointer to type atomic64_unchecked_t
22970 + *
22971 + * Atomically increments @ptr by 1.
22972 + */
22973 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
22974 +{
22975 + atomic64_add_unchecked(1, ptr);
22976 +}
22977 +EXPORT_SYMBOL(atomic64_inc_unchecked);
22978 +
22979 +/**
22980 * atomic64_dec - decrement atomic64 variable
22981 * @ptr: pointer to type atomic64_t
22982 *
22983 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
22984 EXPORT_SYMBOL(atomic64_dec);
22985
22986 /**
22987 + * atomic64_dec_unchecked - decrement atomic64 variable
22988 + * @ptr: pointer to type atomic64_unchecked_t
22989 + *
22990 + * Atomically decrements @ptr by 1.
22991 + */
22992 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
22993 +{
22994 + atomic64_sub_unchecked(1, ptr);
22995 +}
22996 +EXPORT_SYMBOL(atomic64_dec_unchecked);
22997 +
22998 +/**
22999 * atomic64_dec_and_test - decrement and test
23000 * @ptr: pointer to type atomic64_t
23001 *
23002 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
23003 index adbccd0..98f96c8 100644
23004 --- a/arch/x86/lib/checksum_32.S
23005 +++ b/arch/x86/lib/checksum_32.S
23006 @@ -28,7 +28,8 @@
23007 #include <linux/linkage.h>
23008 #include <asm/dwarf2.h>
23009 #include <asm/errno.h>
23010 -
23011 +#include <asm/segment.h>
23012 +
23013 /*
23014 * computes a partial checksum, e.g. for TCP/UDP fragments
23015 */
23016 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
23017
23018 #define ARGBASE 16
23019 #define FP 12
23020 -
23021 -ENTRY(csum_partial_copy_generic)
23022 +
23023 +ENTRY(csum_partial_copy_generic_to_user)
23024 CFI_STARTPROC
23025 +
23026 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23027 + pushl %gs
23028 + CFI_ADJUST_CFA_OFFSET 4
23029 + popl %es
23030 + CFI_ADJUST_CFA_OFFSET -4
23031 + jmp csum_partial_copy_generic
23032 +#endif
23033 +
23034 +ENTRY(csum_partial_copy_generic_from_user)
23035 +
23036 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23037 + pushl %gs
23038 + CFI_ADJUST_CFA_OFFSET 4
23039 + popl %ds
23040 + CFI_ADJUST_CFA_OFFSET -4
23041 +#endif
23042 +
23043 +ENTRY(csum_partial_copy_generic)
23044 subl $4,%esp
23045 CFI_ADJUST_CFA_OFFSET 4
23046 pushl %edi
23047 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
23048 jmp 4f
23049 SRC(1: movw (%esi), %bx )
23050 addl $2, %esi
23051 -DST( movw %bx, (%edi) )
23052 +DST( movw %bx, %es:(%edi) )
23053 addl $2, %edi
23054 addw %bx, %ax
23055 adcl $0, %eax
23056 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
23057 SRC(1: movl (%esi), %ebx )
23058 SRC( movl 4(%esi), %edx )
23059 adcl %ebx, %eax
23060 -DST( movl %ebx, (%edi) )
23061 +DST( movl %ebx, %es:(%edi) )
23062 adcl %edx, %eax
23063 -DST( movl %edx, 4(%edi) )
23064 +DST( movl %edx, %es:4(%edi) )
23065
23066 SRC( movl 8(%esi), %ebx )
23067 SRC( movl 12(%esi), %edx )
23068 adcl %ebx, %eax
23069 -DST( movl %ebx, 8(%edi) )
23070 +DST( movl %ebx, %es:8(%edi) )
23071 adcl %edx, %eax
23072 -DST( movl %edx, 12(%edi) )
23073 +DST( movl %edx, %es:12(%edi) )
23074
23075 SRC( movl 16(%esi), %ebx )
23076 SRC( movl 20(%esi), %edx )
23077 adcl %ebx, %eax
23078 -DST( movl %ebx, 16(%edi) )
23079 +DST( movl %ebx, %es:16(%edi) )
23080 adcl %edx, %eax
23081 -DST( movl %edx, 20(%edi) )
23082 +DST( movl %edx, %es:20(%edi) )
23083
23084 SRC( movl 24(%esi), %ebx )
23085 SRC( movl 28(%esi), %edx )
23086 adcl %ebx, %eax
23087 -DST( movl %ebx, 24(%edi) )
23088 +DST( movl %ebx, %es:24(%edi) )
23089 adcl %edx, %eax
23090 -DST( movl %edx, 28(%edi) )
23091 +DST( movl %edx, %es:28(%edi) )
23092
23093 lea 32(%esi), %esi
23094 lea 32(%edi), %edi
23095 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
23096 shrl $2, %edx # This clears CF
23097 SRC(3: movl (%esi), %ebx )
23098 adcl %ebx, %eax
23099 -DST( movl %ebx, (%edi) )
23100 +DST( movl %ebx, %es:(%edi) )
23101 lea 4(%esi), %esi
23102 lea 4(%edi), %edi
23103 dec %edx
23104 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
23105 jb 5f
23106 SRC( movw (%esi), %cx )
23107 leal 2(%esi), %esi
23108 -DST( movw %cx, (%edi) )
23109 +DST( movw %cx, %es:(%edi) )
23110 leal 2(%edi), %edi
23111 je 6f
23112 shll $16,%ecx
23113 SRC(5: movb (%esi), %cl )
23114 -DST( movb %cl, (%edi) )
23115 +DST( movb %cl, %es:(%edi) )
23116 6: addl %ecx, %eax
23117 adcl $0, %eax
23118 7:
23119 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
23120
23121 6001:
23122 movl ARGBASE+20(%esp), %ebx # src_err_ptr
23123 - movl $-EFAULT, (%ebx)
23124 + movl $-EFAULT, %ss:(%ebx)
23125
23126 # zero the complete destination - computing the rest
23127 # is too much work
23128 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
23129
23130 6002:
23131 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
23132 - movl $-EFAULT,(%ebx)
23133 + movl $-EFAULT,%ss:(%ebx)
23134 jmp 5000b
23135
23136 .previous
23137
23138 + pushl %ss
23139 + CFI_ADJUST_CFA_OFFSET 4
23140 + popl %ds
23141 + CFI_ADJUST_CFA_OFFSET -4
23142 + pushl %ss
23143 + CFI_ADJUST_CFA_OFFSET 4
23144 + popl %es
23145 + CFI_ADJUST_CFA_OFFSET -4
23146 popl %ebx
23147 CFI_ADJUST_CFA_OFFSET -4
23148 CFI_RESTORE ebx
23149 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
23150 CFI_ADJUST_CFA_OFFSET -4
23151 ret
23152 CFI_ENDPROC
23153 -ENDPROC(csum_partial_copy_generic)
23154 +ENDPROC(csum_partial_copy_generic_to_user)
23155
23156 #else
23157
23158 /* Version for PentiumII/PPro */
23159
23160 #define ROUND1(x) \
23161 + nop; nop; nop; \
23162 SRC(movl x(%esi), %ebx ) ; \
23163 addl %ebx, %eax ; \
23164 - DST(movl %ebx, x(%edi) ) ;
23165 + DST(movl %ebx, %es:x(%edi)) ;
23166
23167 #define ROUND(x) \
23168 + nop; nop; nop; \
23169 SRC(movl x(%esi), %ebx ) ; \
23170 adcl %ebx, %eax ; \
23171 - DST(movl %ebx, x(%edi) ) ;
23172 + DST(movl %ebx, %es:x(%edi)) ;
23173
23174 #define ARGBASE 12
23175 -
23176 -ENTRY(csum_partial_copy_generic)
23177 +
23178 +ENTRY(csum_partial_copy_generic_to_user)
23179 CFI_STARTPROC
23180 +
23181 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23182 + pushl %gs
23183 + CFI_ADJUST_CFA_OFFSET 4
23184 + popl %es
23185 + CFI_ADJUST_CFA_OFFSET -4
23186 + jmp csum_partial_copy_generic
23187 +#endif
23188 +
23189 +ENTRY(csum_partial_copy_generic_from_user)
23190 +
23191 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23192 + pushl %gs
23193 + CFI_ADJUST_CFA_OFFSET 4
23194 + popl %ds
23195 + CFI_ADJUST_CFA_OFFSET -4
23196 +#endif
23197 +
23198 +ENTRY(csum_partial_copy_generic)
23199 pushl %ebx
23200 CFI_ADJUST_CFA_OFFSET 4
23201 CFI_REL_OFFSET ebx, 0
23202 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
23203 subl %ebx, %edi
23204 lea -1(%esi),%edx
23205 andl $-32,%edx
23206 - lea 3f(%ebx,%ebx), %ebx
23207 + lea 3f(%ebx,%ebx,2), %ebx
23208 testl %esi, %esi
23209 jmp *%ebx
23210 1: addl $64,%esi
23211 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
23212 jb 5f
23213 SRC( movw (%esi), %dx )
23214 leal 2(%esi), %esi
23215 -DST( movw %dx, (%edi) )
23216 +DST( movw %dx, %es:(%edi) )
23217 leal 2(%edi), %edi
23218 je 6f
23219 shll $16,%edx
23220 5:
23221 SRC( movb (%esi), %dl )
23222 -DST( movb %dl, (%edi) )
23223 +DST( movb %dl, %es:(%edi) )
23224 6: addl %edx, %eax
23225 adcl $0, %eax
23226 7:
23227 .section .fixup, "ax"
23228 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
23229 - movl $-EFAULT, (%ebx)
23230 + movl $-EFAULT, %ss:(%ebx)
23231 # zero the complete destination (computing the rest is too much work)
23232 movl ARGBASE+8(%esp),%edi # dst
23233 movl ARGBASE+12(%esp),%ecx # len
23234 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
23235 rep; stosb
23236 jmp 7b
23237 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
23238 - movl $-EFAULT, (%ebx)
23239 + movl $-EFAULT, %ss:(%ebx)
23240 jmp 7b
23241 .previous
23242
23243 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23244 + pushl %ss
23245 + CFI_ADJUST_CFA_OFFSET 4
23246 + popl %ds
23247 + CFI_ADJUST_CFA_OFFSET -4
23248 + pushl %ss
23249 + CFI_ADJUST_CFA_OFFSET 4
23250 + popl %es
23251 + CFI_ADJUST_CFA_OFFSET -4
23252 +#endif
23253 +
23254 popl %esi
23255 CFI_ADJUST_CFA_OFFSET -4
23256 CFI_RESTORE esi
23257 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
23258 CFI_RESTORE ebx
23259 ret
23260 CFI_ENDPROC
23261 -ENDPROC(csum_partial_copy_generic)
23262 +ENDPROC(csum_partial_copy_generic_to_user)
23263
23264 #undef ROUND
23265 #undef ROUND1
23266 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
23267 index ebeafcc..1e3a402 100644
23268 --- a/arch/x86/lib/clear_page_64.S
23269 +++ b/arch/x86/lib/clear_page_64.S
23270 @@ -1,5 +1,6 @@
23271 #include <linux/linkage.h>
23272 #include <asm/dwarf2.h>
23273 +#include <asm/alternative-asm.h>
23274
23275 /*
23276 * Zero a page.
23277 @@ -10,6 +11,7 @@ ENTRY(clear_page_c)
23278 movl $4096/8,%ecx
23279 xorl %eax,%eax
23280 rep stosq
23281 + pax_force_retaddr
23282 ret
23283 CFI_ENDPROC
23284 ENDPROC(clear_page_c)
23285 @@ -33,6 +35,7 @@ ENTRY(clear_page)
23286 leaq 64(%rdi),%rdi
23287 jnz .Lloop
23288 nop
23289 + pax_force_retaddr
23290 ret
23291 CFI_ENDPROC
23292 .Lclear_page_end:
23293 @@ -43,7 +46,7 @@ ENDPROC(clear_page)
23294
23295 #include <asm/cpufeature.h>
23296
23297 - .section .altinstr_replacement,"ax"
23298 + .section .altinstr_replacement,"a"
23299 1: .byte 0xeb /* jmp <disp8> */
23300 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
23301 2:
23302 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
23303 index 727a5d4..333818a 100644
23304 --- a/arch/x86/lib/copy_page_64.S
23305 +++ b/arch/x86/lib/copy_page_64.S
23306 @@ -2,12 +2,14 @@
23307
23308 #include <linux/linkage.h>
23309 #include <asm/dwarf2.h>
23310 +#include <asm/alternative-asm.h>
23311
23312 ALIGN
23313 copy_page_c:
23314 CFI_STARTPROC
23315 movl $4096/8,%ecx
23316 rep movsq
23317 + pax_force_retaddr
23318 ret
23319 CFI_ENDPROC
23320 ENDPROC(copy_page_c)
23321 @@ -38,7 +40,7 @@ ENTRY(copy_page)
23322 movq 16 (%rsi), %rdx
23323 movq 24 (%rsi), %r8
23324 movq 32 (%rsi), %r9
23325 - movq 40 (%rsi), %r10
23326 + movq 40 (%rsi), %r13
23327 movq 48 (%rsi), %r11
23328 movq 56 (%rsi), %r12
23329
23330 @@ -49,7 +51,7 @@ ENTRY(copy_page)
23331 movq %rdx, 16 (%rdi)
23332 movq %r8, 24 (%rdi)
23333 movq %r9, 32 (%rdi)
23334 - movq %r10, 40 (%rdi)
23335 + movq %r13, 40 (%rdi)
23336 movq %r11, 48 (%rdi)
23337 movq %r12, 56 (%rdi)
23338
23339 @@ -68,7 +70,7 @@ ENTRY(copy_page)
23340 movq 16 (%rsi), %rdx
23341 movq 24 (%rsi), %r8
23342 movq 32 (%rsi), %r9
23343 - movq 40 (%rsi), %r10
23344 + movq 40 (%rsi), %r13
23345 movq 48 (%rsi), %r11
23346 movq 56 (%rsi), %r12
23347
23348 @@ -77,7 +79,7 @@ ENTRY(copy_page)
23349 movq %rdx, 16 (%rdi)
23350 movq %r8, 24 (%rdi)
23351 movq %r9, 32 (%rdi)
23352 - movq %r10, 40 (%rdi)
23353 + movq %r13, 40 (%rdi)
23354 movq %r11, 48 (%rdi)
23355 movq %r12, 56 (%rdi)
23356
23357 @@ -94,6 +96,7 @@ ENTRY(copy_page)
23358 CFI_RESTORE r13
23359 addq $3*8,%rsp
23360 CFI_ADJUST_CFA_OFFSET -3*8
23361 + pax_force_retaddr
23362 ret
23363 .Lcopy_page_end:
23364 CFI_ENDPROC
23365 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
23366
23367 #include <asm/cpufeature.h>
23368
23369 - .section .altinstr_replacement,"ax"
23370 + .section .altinstr_replacement,"a"
23371 1: .byte 0xeb /* jmp <disp8> */
23372 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
23373 2:
23374 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
23375 index af8debd..40c75f3 100644
23376 --- a/arch/x86/lib/copy_user_64.S
23377 +++ b/arch/x86/lib/copy_user_64.S
23378 @@ -15,13 +15,15 @@
23379 #include <asm/asm-offsets.h>
23380 #include <asm/thread_info.h>
23381 #include <asm/cpufeature.h>
23382 +#include <asm/pgtable.h>
23383 +#include <asm/alternative-asm.h>
23384
23385 .macro ALTERNATIVE_JUMP feature,orig,alt
23386 0:
23387 .byte 0xe9 /* 32bit jump */
23388 .long \orig-1f /* by default jump to orig */
23389 1:
23390 - .section .altinstr_replacement,"ax"
23391 + .section .altinstr_replacement,"a"
23392 2: .byte 0xe9 /* near jump with 32bit immediate */
23393 .long \alt-1b /* offset */ /* or alternatively to alt */
23394 .previous
23395 @@ -64,55 +66,26 @@
23396 #endif
23397 .endm
23398
23399 -/* Standard copy_to_user with segment limit checking */
23400 -ENTRY(copy_to_user)
23401 - CFI_STARTPROC
23402 - GET_THREAD_INFO(%rax)
23403 - movq %rdi,%rcx
23404 - addq %rdx,%rcx
23405 - jc bad_to_user
23406 - cmpq TI_addr_limit(%rax),%rcx
23407 - ja bad_to_user
23408 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23409 - CFI_ENDPROC
23410 -ENDPROC(copy_to_user)
23411 -
23412 -/* Standard copy_from_user with segment limit checking */
23413 -ENTRY(copy_from_user)
23414 - CFI_STARTPROC
23415 - GET_THREAD_INFO(%rax)
23416 - movq %rsi,%rcx
23417 - addq %rdx,%rcx
23418 - jc bad_from_user
23419 - cmpq TI_addr_limit(%rax),%rcx
23420 - ja bad_from_user
23421 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23422 - CFI_ENDPROC
23423 -ENDPROC(copy_from_user)
23424 -
23425 ENTRY(copy_user_generic)
23426 CFI_STARTPROC
23427 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23428 CFI_ENDPROC
23429 ENDPROC(copy_user_generic)
23430
23431 -ENTRY(__copy_from_user_inatomic)
23432 - CFI_STARTPROC
23433 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23434 - CFI_ENDPROC
23435 -ENDPROC(__copy_from_user_inatomic)
23436 -
23437 .section .fixup,"ax"
23438 /* must zero dest */
23439 ENTRY(bad_from_user)
23440 bad_from_user:
23441 CFI_STARTPROC
23442 + testl %edx,%edx
23443 + js bad_to_user
23444 movl %edx,%ecx
23445 xorl %eax,%eax
23446 rep
23447 stosb
23448 bad_to_user:
23449 movl %edx,%eax
23450 + pax_force_retaddr
23451 ret
23452 CFI_ENDPROC
23453 ENDPROC(bad_from_user)
23454 @@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
23455 jz 17f
23456 1: movq (%rsi),%r8
23457 2: movq 1*8(%rsi),%r9
23458 -3: movq 2*8(%rsi),%r10
23459 +3: movq 2*8(%rsi),%rax
23460 4: movq 3*8(%rsi),%r11
23461 5: movq %r8,(%rdi)
23462 6: movq %r9,1*8(%rdi)
23463 -7: movq %r10,2*8(%rdi)
23464 +7: movq %rax,2*8(%rdi)
23465 8: movq %r11,3*8(%rdi)
23466 9: movq 4*8(%rsi),%r8
23467 10: movq 5*8(%rsi),%r9
23468 -11: movq 6*8(%rsi),%r10
23469 +11: movq 6*8(%rsi),%rax
23470 12: movq 7*8(%rsi),%r11
23471 13: movq %r8,4*8(%rdi)
23472 14: movq %r9,5*8(%rdi)
23473 -15: movq %r10,6*8(%rdi)
23474 +15: movq %rax,6*8(%rdi)
23475 16: movq %r11,7*8(%rdi)
23476 leaq 64(%rsi),%rsi
23477 leaq 64(%rdi),%rdi
23478 @@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
23479 decl %ecx
23480 jnz 21b
23481 23: xor %eax,%eax
23482 + pax_force_retaddr
23483 ret
23484
23485 .section .fixup,"ax"
23486 @@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
23487 3: rep
23488 movsb
23489 4: xorl %eax,%eax
23490 + pax_force_retaddr
23491 ret
23492
23493 .section .fixup,"ax"
23494 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
23495 index cb0c112..e3a6895 100644
23496 --- a/arch/x86/lib/copy_user_nocache_64.S
23497 +++ b/arch/x86/lib/copy_user_nocache_64.S
23498 @@ -8,12 +8,14 @@
23499
23500 #include <linux/linkage.h>
23501 #include <asm/dwarf2.h>
23502 +#include <asm/alternative-asm.h>
23503
23504 #define FIX_ALIGNMENT 1
23505
23506 #include <asm/current.h>
23507 #include <asm/asm-offsets.h>
23508 #include <asm/thread_info.h>
23509 +#include <asm/pgtable.h>
23510
23511 .macro ALIGN_DESTINATION
23512 #ifdef FIX_ALIGNMENT
23513 @@ -50,6 +52,15 @@
23514 */
23515 ENTRY(__copy_user_nocache)
23516 CFI_STARTPROC
23517 +
23518 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23519 + mov $PAX_USER_SHADOW_BASE,%rcx
23520 + cmp %rcx,%rsi
23521 + jae 1f
23522 + add %rcx,%rsi
23523 +1:
23524 +#endif
23525 +
23526 cmpl $8,%edx
23527 jb 20f /* less then 8 bytes, go to byte copy loop */
23528 ALIGN_DESTINATION
23529 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
23530 jz 17f
23531 1: movq (%rsi),%r8
23532 2: movq 1*8(%rsi),%r9
23533 -3: movq 2*8(%rsi),%r10
23534 +3: movq 2*8(%rsi),%rax
23535 4: movq 3*8(%rsi),%r11
23536 5: movnti %r8,(%rdi)
23537 6: movnti %r9,1*8(%rdi)
23538 -7: movnti %r10,2*8(%rdi)
23539 +7: movnti %rax,2*8(%rdi)
23540 8: movnti %r11,3*8(%rdi)
23541 9: movq 4*8(%rsi),%r8
23542 10: movq 5*8(%rsi),%r9
23543 -11: movq 6*8(%rsi),%r10
23544 +11: movq 6*8(%rsi),%rax
23545 12: movq 7*8(%rsi),%r11
23546 13: movnti %r8,4*8(%rdi)
23547 14: movnti %r9,5*8(%rdi)
23548 -15: movnti %r10,6*8(%rdi)
23549 +15: movnti %rax,6*8(%rdi)
23550 16: movnti %r11,7*8(%rdi)
23551 leaq 64(%rsi),%rsi
23552 leaq 64(%rdi),%rdi
23553 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
23554 jnz 21b
23555 23: xorl %eax,%eax
23556 sfence
23557 + pax_force_retaddr
23558 ret
23559
23560 .section .fixup,"ax"
23561 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
23562 index f0dba36..48cb4d6 100644
23563 --- a/arch/x86/lib/csum-copy_64.S
23564 +++ b/arch/x86/lib/csum-copy_64.S
23565 @@ -8,6 +8,7 @@
23566 #include <linux/linkage.h>
23567 #include <asm/dwarf2.h>
23568 #include <asm/errno.h>
23569 +#include <asm/alternative-asm.h>
23570
23571 /*
23572 * Checksum copy with exception handling.
23573 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
23574 CFI_RESTORE rbp
23575 addq $7*8,%rsp
23576 CFI_ADJUST_CFA_OFFSET -7*8
23577 + pax_force_retaddr 0, 1
23578 ret
23579 CFI_RESTORE_STATE
23580
23581 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
23582 index 459b58a..9570bc7 100644
23583 --- a/arch/x86/lib/csum-wrappers_64.c
23584 +++ b/arch/x86/lib/csum-wrappers_64.c
23585 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
23586 len -= 2;
23587 }
23588 }
23589 - isum = csum_partial_copy_generic((__force const void *)src,
23590 +
23591 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23592 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23593 + src += PAX_USER_SHADOW_BASE;
23594 +#endif
23595 +
23596 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
23597 dst, len, isum, errp, NULL);
23598 if (unlikely(*errp))
23599 goto out_err;
23600 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
23601 }
23602
23603 *errp = 0;
23604 - return csum_partial_copy_generic(src, (void __force *)dst,
23605 +
23606 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23607 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
23608 + dst += PAX_USER_SHADOW_BASE;
23609 +#endif
23610 +
23611 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
23612 len, isum, NULL, errp);
23613 }
23614 EXPORT_SYMBOL(csum_partial_copy_to_user);
23615 diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
23616 index ff485d3..b6372ce 100644
23617 --- a/arch/x86/lib/delay.c
23618 +++ b/arch/x86/lib/delay.c
23619 @@ -48,9 +48,9 @@ static void delay_loop(unsigned long loops)
23620 }
23621
23622 /* TSC based delay: */
23623 -static void delay_tsc(unsigned long loops)
23624 +static void delay_tsc(unsigned long __loops)
23625 {
23626 - unsigned long bclock, now;
23627 + u32 bclock, now, loops = __loops;
23628 int cpu;
23629
23630 preempt_disable();
23631 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
23632 index 51f1504..ddac4c1 100644
23633 --- a/arch/x86/lib/getuser.S
23634 +++ b/arch/x86/lib/getuser.S
23635 @@ -33,15 +33,38 @@
23636 #include <asm/asm-offsets.h>
23637 #include <asm/thread_info.h>
23638 #include <asm/asm.h>
23639 +#include <asm/segment.h>
23640 +#include <asm/pgtable.h>
23641 +#include <asm/alternative-asm.h>
23642 +
23643 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23644 +#define __copyuser_seg gs;
23645 +#else
23646 +#define __copyuser_seg
23647 +#endif
23648
23649 .text
23650 ENTRY(__get_user_1)
23651 CFI_STARTPROC
23652 +
23653 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23654 GET_THREAD_INFO(%_ASM_DX)
23655 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23656 jae bad_get_user
23657 -1: movzb (%_ASM_AX),%edx
23658 +
23659 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23660 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23661 + cmp %_ASM_DX,%_ASM_AX
23662 + jae 1234f
23663 + add %_ASM_DX,%_ASM_AX
23664 +1234:
23665 +#endif
23666 +
23667 +#endif
23668 +
23669 +1: __copyuser_seg movzb (%_ASM_AX),%edx
23670 xor %eax,%eax
23671 + pax_force_retaddr
23672 ret
23673 CFI_ENDPROC
23674 ENDPROC(__get_user_1)
23675 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
23676 ENTRY(__get_user_2)
23677 CFI_STARTPROC
23678 add $1,%_ASM_AX
23679 +
23680 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23681 jc bad_get_user
23682 GET_THREAD_INFO(%_ASM_DX)
23683 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23684 jae bad_get_user
23685 -2: movzwl -1(%_ASM_AX),%edx
23686 +
23687 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23688 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23689 + cmp %_ASM_DX,%_ASM_AX
23690 + jae 1234f
23691 + add %_ASM_DX,%_ASM_AX
23692 +1234:
23693 +#endif
23694 +
23695 +#endif
23696 +
23697 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
23698 xor %eax,%eax
23699 + pax_force_retaddr
23700 ret
23701 CFI_ENDPROC
23702 ENDPROC(__get_user_2)
23703 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
23704 ENTRY(__get_user_4)
23705 CFI_STARTPROC
23706 add $3,%_ASM_AX
23707 +
23708 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23709 jc bad_get_user
23710 GET_THREAD_INFO(%_ASM_DX)
23711 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23712 jae bad_get_user
23713 -3: mov -3(%_ASM_AX),%edx
23714 +
23715 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23716 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23717 + cmp %_ASM_DX,%_ASM_AX
23718 + jae 1234f
23719 + add %_ASM_DX,%_ASM_AX
23720 +1234:
23721 +#endif
23722 +
23723 +#endif
23724 +
23725 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
23726 xor %eax,%eax
23727 + pax_force_retaddr
23728 ret
23729 CFI_ENDPROC
23730 ENDPROC(__get_user_4)
23731 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
23732 GET_THREAD_INFO(%_ASM_DX)
23733 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23734 jae bad_get_user
23735 +
23736 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23737 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23738 + cmp %_ASM_DX,%_ASM_AX
23739 + jae 1234f
23740 + add %_ASM_DX,%_ASM_AX
23741 +1234:
23742 +#endif
23743 +
23744 4: movq -7(%_ASM_AX),%_ASM_DX
23745 xor %eax,%eax
23746 + pax_force_retaddr
23747 ret
23748 CFI_ENDPROC
23749 ENDPROC(__get_user_8)
23750 @@ -91,6 +152,7 @@ bad_get_user:
23751 CFI_STARTPROC
23752 xor %edx,%edx
23753 mov $(-EFAULT),%_ASM_AX
23754 + pax_force_retaddr
23755 ret
23756 CFI_ENDPROC
23757 END(bad_get_user)
23758 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
23759 index 05a95e7..326f2fa 100644
23760 --- a/arch/x86/lib/iomap_copy_64.S
23761 +++ b/arch/x86/lib/iomap_copy_64.S
23762 @@ -17,6 +17,7 @@
23763
23764 #include <linux/linkage.h>
23765 #include <asm/dwarf2.h>
23766 +#include <asm/alternative-asm.h>
23767
23768 /*
23769 * override generic version in lib/iomap_copy.c
23770 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
23771 CFI_STARTPROC
23772 movl %edx,%ecx
23773 rep movsd
23774 + pax_force_retaddr
23775 ret
23776 CFI_ENDPROC
23777 ENDPROC(__iowrite32_copy)
23778 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
23779 index ad5441e..610e351 100644
23780 --- a/arch/x86/lib/memcpy_64.S
23781 +++ b/arch/x86/lib/memcpy_64.S
23782 @@ -4,6 +4,7 @@
23783
23784 #include <asm/cpufeature.h>
23785 #include <asm/dwarf2.h>
23786 +#include <asm/alternative-asm.h>
23787
23788 /*
23789 * memcpy - Copy a memory block.
23790 @@ -34,6 +35,7 @@ memcpy_c:
23791 rep movsq
23792 movl %edx, %ecx
23793 rep movsb
23794 + pax_force_retaddr
23795 ret
23796 CFI_ENDPROC
23797 ENDPROC(memcpy_c)
23798 @@ -118,6 +120,7 @@ ENTRY(memcpy)
23799 jnz .Lloop_1
23800
23801 .Lend:
23802 + pax_force_retaddr 0, 1
23803 ret
23804 CFI_ENDPROC
23805 ENDPROC(memcpy)
23806 @@ -128,7 +131,7 @@ ENDPROC(__memcpy)
23807 * It is also a lot simpler. Use this when possible:
23808 */
23809
23810 - .section .altinstr_replacement, "ax"
23811 + .section .altinstr_replacement, "a"
23812 1: .byte 0xeb /* jmp <disp8> */
23813 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
23814 2:
23815 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
23816 index 2c59481..7e9ba4e 100644
23817 --- a/arch/x86/lib/memset_64.S
23818 +++ b/arch/x86/lib/memset_64.S
23819 @@ -2,6 +2,7 @@
23820
23821 #include <linux/linkage.h>
23822 #include <asm/dwarf2.h>
23823 +#include <asm/alternative-asm.h>
23824
23825 /*
23826 * ISO C memset - set a memory block to a byte value.
23827 @@ -28,6 +29,7 @@ memset_c:
23828 movl %r8d,%ecx
23829 rep stosb
23830 movq %r9,%rax
23831 + pax_force_retaddr
23832 ret
23833 CFI_ENDPROC
23834 ENDPROC(memset_c)
23835 @@ -35,13 +37,13 @@ ENDPROC(memset_c)
23836 ENTRY(memset)
23837 ENTRY(__memset)
23838 CFI_STARTPROC
23839 - movq %rdi,%r10
23840 movq %rdx,%r11
23841
23842 /* expand byte value */
23843 movzbl %sil,%ecx
23844 movabs $0x0101010101010101,%rax
23845 mul %rcx /* with rax, clobbers rdx */
23846 + movq %rdi,%rdx
23847
23848 /* align dst */
23849 movl %edi,%r9d
23850 @@ -95,7 +97,8 @@ ENTRY(__memset)
23851 jnz .Lloop_1
23852
23853 .Lende:
23854 - movq %r10,%rax
23855 + movq %rdx,%rax
23856 + pax_force_retaddr
23857 ret
23858
23859 CFI_RESTORE_STATE
23860 @@ -118,7 +121,7 @@ ENDPROC(__memset)
23861
23862 #include <asm/cpufeature.h>
23863
23864 - .section .altinstr_replacement,"ax"
23865 + .section .altinstr_replacement,"a"
23866 1: .byte 0xeb /* jmp <disp8> */
23867 .byte (memset_c - memset) - (2f - 1b) /* offset */
23868 2:
23869 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
23870 index c9f2d9b..e7fd2c0 100644
23871 --- a/arch/x86/lib/mmx_32.c
23872 +++ b/arch/x86/lib/mmx_32.c
23873 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23874 {
23875 void *p;
23876 int i;
23877 + unsigned long cr0;
23878
23879 if (unlikely(in_interrupt()))
23880 return __memcpy(to, from, len);
23881 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23882 kernel_fpu_begin();
23883
23884 __asm__ __volatile__ (
23885 - "1: prefetch (%0)\n" /* This set is 28 bytes */
23886 - " prefetch 64(%0)\n"
23887 - " prefetch 128(%0)\n"
23888 - " prefetch 192(%0)\n"
23889 - " prefetch 256(%0)\n"
23890 + "1: prefetch (%1)\n" /* This set is 28 bytes */
23891 + " prefetch 64(%1)\n"
23892 + " prefetch 128(%1)\n"
23893 + " prefetch 192(%1)\n"
23894 + " prefetch 256(%1)\n"
23895 "2: \n"
23896 ".section .fixup, \"ax\"\n"
23897 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23898 + "3: \n"
23899 +
23900 +#ifdef CONFIG_PAX_KERNEXEC
23901 + " movl %%cr0, %0\n"
23902 + " movl %0, %%eax\n"
23903 + " andl $0xFFFEFFFF, %%eax\n"
23904 + " movl %%eax, %%cr0\n"
23905 +#endif
23906 +
23907 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23908 +
23909 +#ifdef CONFIG_PAX_KERNEXEC
23910 + " movl %0, %%cr0\n"
23911 +#endif
23912 +
23913 " jmp 2b\n"
23914 ".previous\n"
23915 _ASM_EXTABLE(1b, 3b)
23916 - : : "r" (from));
23917 + : "=&r" (cr0) : "r" (from) : "ax");
23918
23919 for ( ; i > 5; i--) {
23920 __asm__ __volatile__ (
23921 - "1: prefetch 320(%0)\n"
23922 - "2: movq (%0), %%mm0\n"
23923 - " movq 8(%0), %%mm1\n"
23924 - " movq 16(%0), %%mm2\n"
23925 - " movq 24(%0), %%mm3\n"
23926 - " movq %%mm0, (%1)\n"
23927 - " movq %%mm1, 8(%1)\n"
23928 - " movq %%mm2, 16(%1)\n"
23929 - " movq %%mm3, 24(%1)\n"
23930 - " movq 32(%0), %%mm0\n"
23931 - " movq 40(%0), %%mm1\n"
23932 - " movq 48(%0), %%mm2\n"
23933 - " movq 56(%0), %%mm3\n"
23934 - " movq %%mm0, 32(%1)\n"
23935 - " movq %%mm1, 40(%1)\n"
23936 - " movq %%mm2, 48(%1)\n"
23937 - " movq %%mm3, 56(%1)\n"
23938 + "1: prefetch 320(%1)\n"
23939 + "2: movq (%1), %%mm0\n"
23940 + " movq 8(%1), %%mm1\n"
23941 + " movq 16(%1), %%mm2\n"
23942 + " movq 24(%1), %%mm3\n"
23943 + " movq %%mm0, (%2)\n"
23944 + " movq %%mm1, 8(%2)\n"
23945 + " movq %%mm2, 16(%2)\n"
23946 + " movq %%mm3, 24(%2)\n"
23947 + " movq 32(%1), %%mm0\n"
23948 + " movq 40(%1), %%mm1\n"
23949 + " movq 48(%1), %%mm2\n"
23950 + " movq 56(%1), %%mm3\n"
23951 + " movq %%mm0, 32(%2)\n"
23952 + " movq %%mm1, 40(%2)\n"
23953 + " movq %%mm2, 48(%2)\n"
23954 + " movq %%mm3, 56(%2)\n"
23955 ".section .fixup, \"ax\"\n"
23956 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23957 + "3:\n"
23958 +
23959 +#ifdef CONFIG_PAX_KERNEXEC
23960 + " movl %%cr0, %0\n"
23961 + " movl %0, %%eax\n"
23962 + " andl $0xFFFEFFFF, %%eax\n"
23963 + " movl %%eax, %%cr0\n"
23964 +#endif
23965 +
23966 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23967 +
23968 +#ifdef CONFIG_PAX_KERNEXEC
23969 + " movl %0, %%cr0\n"
23970 +#endif
23971 +
23972 " jmp 2b\n"
23973 ".previous\n"
23974 _ASM_EXTABLE(1b, 3b)
23975 - : : "r" (from), "r" (to) : "memory");
23976 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23977
23978 from += 64;
23979 to += 64;
23980 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
23981 static void fast_copy_page(void *to, void *from)
23982 {
23983 int i;
23984 + unsigned long cr0;
23985
23986 kernel_fpu_begin();
23987
23988 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
23989 * but that is for later. -AV
23990 */
23991 __asm__ __volatile__(
23992 - "1: prefetch (%0)\n"
23993 - " prefetch 64(%0)\n"
23994 - " prefetch 128(%0)\n"
23995 - " prefetch 192(%0)\n"
23996 - " prefetch 256(%0)\n"
23997 + "1: prefetch (%1)\n"
23998 + " prefetch 64(%1)\n"
23999 + " prefetch 128(%1)\n"
24000 + " prefetch 192(%1)\n"
24001 + " prefetch 256(%1)\n"
24002 "2: \n"
24003 ".section .fixup, \"ax\"\n"
24004 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24005 + "3: \n"
24006 +
24007 +#ifdef CONFIG_PAX_KERNEXEC
24008 + " movl %%cr0, %0\n"
24009 + " movl %0, %%eax\n"
24010 + " andl $0xFFFEFFFF, %%eax\n"
24011 + " movl %%eax, %%cr0\n"
24012 +#endif
24013 +
24014 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24015 +
24016 +#ifdef CONFIG_PAX_KERNEXEC
24017 + " movl %0, %%cr0\n"
24018 +#endif
24019 +
24020 " jmp 2b\n"
24021 ".previous\n"
24022 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
24023 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
24024
24025 for (i = 0; i < (4096-320)/64; i++) {
24026 __asm__ __volatile__ (
24027 - "1: prefetch 320(%0)\n"
24028 - "2: movq (%0), %%mm0\n"
24029 - " movntq %%mm0, (%1)\n"
24030 - " movq 8(%0), %%mm1\n"
24031 - " movntq %%mm1, 8(%1)\n"
24032 - " movq 16(%0), %%mm2\n"
24033 - " movntq %%mm2, 16(%1)\n"
24034 - " movq 24(%0), %%mm3\n"
24035 - " movntq %%mm3, 24(%1)\n"
24036 - " movq 32(%0), %%mm4\n"
24037 - " movntq %%mm4, 32(%1)\n"
24038 - " movq 40(%0), %%mm5\n"
24039 - " movntq %%mm5, 40(%1)\n"
24040 - " movq 48(%0), %%mm6\n"
24041 - " movntq %%mm6, 48(%1)\n"
24042 - " movq 56(%0), %%mm7\n"
24043 - " movntq %%mm7, 56(%1)\n"
24044 + "1: prefetch 320(%1)\n"
24045 + "2: movq (%1), %%mm0\n"
24046 + " movntq %%mm0, (%2)\n"
24047 + " movq 8(%1), %%mm1\n"
24048 + " movntq %%mm1, 8(%2)\n"
24049 + " movq 16(%1), %%mm2\n"
24050 + " movntq %%mm2, 16(%2)\n"
24051 + " movq 24(%1), %%mm3\n"
24052 + " movntq %%mm3, 24(%2)\n"
24053 + " movq 32(%1), %%mm4\n"
24054 + " movntq %%mm4, 32(%2)\n"
24055 + " movq 40(%1), %%mm5\n"
24056 + " movntq %%mm5, 40(%2)\n"
24057 + " movq 48(%1), %%mm6\n"
24058 + " movntq %%mm6, 48(%2)\n"
24059 + " movq 56(%1), %%mm7\n"
24060 + " movntq %%mm7, 56(%2)\n"
24061 ".section .fixup, \"ax\"\n"
24062 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24063 + "3:\n"
24064 +
24065 +#ifdef CONFIG_PAX_KERNEXEC
24066 + " movl %%cr0, %0\n"
24067 + " movl %0, %%eax\n"
24068 + " andl $0xFFFEFFFF, %%eax\n"
24069 + " movl %%eax, %%cr0\n"
24070 +#endif
24071 +
24072 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24073 +
24074 +#ifdef CONFIG_PAX_KERNEXEC
24075 + " movl %0, %%cr0\n"
24076 +#endif
24077 +
24078 " jmp 2b\n"
24079 ".previous\n"
24080 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
24081 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
24082
24083 from += 64;
24084 to += 64;
24085 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
24086 static void fast_copy_page(void *to, void *from)
24087 {
24088 int i;
24089 + unsigned long cr0;
24090
24091 kernel_fpu_begin();
24092
24093 __asm__ __volatile__ (
24094 - "1: prefetch (%0)\n"
24095 - " prefetch 64(%0)\n"
24096 - " prefetch 128(%0)\n"
24097 - " prefetch 192(%0)\n"
24098 - " prefetch 256(%0)\n"
24099 + "1: prefetch (%1)\n"
24100 + " prefetch 64(%1)\n"
24101 + " prefetch 128(%1)\n"
24102 + " prefetch 192(%1)\n"
24103 + " prefetch 256(%1)\n"
24104 "2: \n"
24105 ".section .fixup, \"ax\"\n"
24106 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24107 + "3: \n"
24108 +
24109 +#ifdef CONFIG_PAX_KERNEXEC
24110 + " movl %%cr0, %0\n"
24111 + " movl %0, %%eax\n"
24112 + " andl $0xFFFEFFFF, %%eax\n"
24113 + " movl %%eax, %%cr0\n"
24114 +#endif
24115 +
24116 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24117 +
24118 +#ifdef CONFIG_PAX_KERNEXEC
24119 + " movl %0, %%cr0\n"
24120 +#endif
24121 +
24122 " jmp 2b\n"
24123 ".previous\n"
24124 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
24125 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
24126
24127 for (i = 0; i < 4096/64; i++) {
24128 __asm__ __volatile__ (
24129 - "1: prefetch 320(%0)\n"
24130 - "2: movq (%0), %%mm0\n"
24131 - " movq 8(%0), %%mm1\n"
24132 - " movq 16(%0), %%mm2\n"
24133 - " movq 24(%0), %%mm3\n"
24134 - " movq %%mm0, (%1)\n"
24135 - " movq %%mm1, 8(%1)\n"
24136 - " movq %%mm2, 16(%1)\n"
24137 - " movq %%mm3, 24(%1)\n"
24138 - " movq 32(%0), %%mm0\n"
24139 - " movq 40(%0), %%mm1\n"
24140 - " movq 48(%0), %%mm2\n"
24141 - " movq 56(%0), %%mm3\n"
24142 - " movq %%mm0, 32(%1)\n"
24143 - " movq %%mm1, 40(%1)\n"
24144 - " movq %%mm2, 48(%1)\n"
24145 - " movq %%mm3, 56(%1)\n"
24146 + "1: prefetch 320(%1)\n"
24147 + "2: movq (%1), %%mm0\n"
24148 + " movq 8(%1), %%mm1\n"
24149 + " movq 16(%1), %%mm2\n"
24150 + " movq 24(%1), %%mm3\n"
24151 + " movq %%mm0, (%2)\n"
24152 + " movq %%mm1, 8(%2)\n"
24153 + " movq %%mm2, 16(%2)\n"
24154 + " movq %%mm3, 24(%2)\n"
24155 + " movq 32(%1), %%mm0\n"
24156 + " movq 40(%1), %%mm1\n"
24157 + " movq 48(%1), %%mm2\n"
24158 + " movq 56(%1), %%mm3\n"
24159 + " movq %%mm0, 32(%2)\n"
24160 + " movq %%mm1, 40(%2)\n"
24161 + " movq %%mm2, 48(%2)\n"
24162 + " movq %%mm3, 56(%2)\n"
24163 ".section .fixup, \"ax\"\n"
24164 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24165 + "3:\n"
24166 +
24167 +#ifdef CONFIG_PAX_KERNEXEC
24168 + " movl %%cr0, %0\n"
24169 + " movl %0, %%eax\n"
24170 + " andl $0xFFFEFFFF, %%eax\n"
24171 + " movl %%eax, %%cr0\n"
24172 +#endif
24173 +
24174 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24175 +
24176 +#ifdef CONFIG_PAX_KERNEXEC
24177 + " movl %0, %%cr0\n"
24178 +#endif
24179 +
24180 " jmp 2b\n"
24181 ".previous\n"
24182 _ASM_EXTABLE(1b, 3b)
24183 - : : "r" (from), "r" (to) : "memory");
24184 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
24185
24186 from += 64;
24187 to += 64;
24188 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
24189 index 69fa106..adda88b 100644
24190 --- a/arch/x86/lib/msr-reg.S
24191 +++ b/arch/x86/lib/msr-reg.S
24192 @@ -3,6 +3,7 @@
24193 #include <asm/dwarf2.h>
24194 #include <asm/asm.h>
24195 #include <asm/msr.h>
24196 +#include <asm/alternative-asm.h>
24197
24198 #ifdef CONFIG_X86_64
24199 /*
24200 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
24201 CFI_STARTPROC
24202 pushq_cfi %rbx
24203 pushq_cfi %rbp
24204 - movq %rdi, %r10 /* Save pointer */
24205 + movq %rdi, %r9 /* Save pointer */
24206 xorl %r11d, %r11d /* Return value */
24207 movl (%rdi), %eax
24208 movl 4(%rdi), %ecx
24209 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
24210 movl 28(%rdi), %edi
24211 CFI_REMEMBER_STATE
24212 1: \op
24213 -2: movl %eax, (%r10)
24214 +2: movl %eax, (%r9)
24215 movl %r11d, %eax /* Return value */
24216 - movl %ecx, 4(%r10)
24217 - movl %edx, 8(%r10)
24218 - movl %ebx, 12(%r10)
24219 - movl %ebp, 20(%r10)
24220 - movl %esi, 24(%r10)
24221 - movl %edi, 28(%r10)
24222 + movl %ecx, 4(%r9)
24223 + movl %edx, 8(%r9)
24224 + movl %ebx, 12(%r9)
24225 + movl %ebp, 20(%r9)
24226 + movl %esi, 24(%r9)
24227 + movl %edi, 28(%r9)
24228 popq_cfi %rbp
24229 popq_cfi %rbx
24230 + pax_force_retaddr
24231 ret
24232 3:
24233 CFI_RESTORE_STATE
24234 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
24235 index 36b0d15..d381858 100644
24236 --- a/arch/x86/lib/putuser.S
24237 +++ b/arch/x86/lib/putuser.S
24238 @@ -15,7 +15,9 @@
24239 #include <asm/thread_info.h>
24240 #include <asm/errno.h>
24241 #include <asm/asm.h>
24242 -
24243 +#include <asm/segment.h>
24244 +#include <asm/pgtable.h>
24245 +#include <asm/alternative-asm.h>
24246
24247 /*
24248 * __put_user_X
24249 @@ -29,52 +31,119 @@
24250 * as they get called from within inline assembly.
24251 */
24252
24253 -#define ENTER CFI_STARTPROC ; \
24254 - GET_THREAD_INFO(%_ASM_BX)
24255 -#define EXIT ret ; \
24256 +#define ENTER CFI_STARTPROC
24257 +#define EXIT pax_force_retaddr; ret ; \
24258 CFI_ENDPROC
24259
24260 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24261 +#define _DEST %_ASM_CX,%_ASM_BX
24262 +#else
24263 +#define _DEST %_ASM_CX
24264 +#endif
24265 +
24266 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24267 +#define __copyuser_seg gs;
24268 +#else
24269 +#define __copyuser_seg
24270 +#endif
24271 +
24272 .text
24273 ENTRY(__put_user_1)
24274 ENTER
24275 +
24276 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24277 + GET_THREAD_INFO(%_ASM_BX)
24278 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
24279 jae bad_put_user
24280 -1: movb %al,(%_ASM_CX)
24281 +
24282 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24283 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24284 + cmp %_ASM_BX,%_ASM_CX
24285 + jb 1234f
24286 + xor %ebx,%ebx
24287 +1234:
24288 +#endif
24289 +
24290 +#endif
24291 +
24292 +1: __copyuser_seg movb %al,(_DEST)
24293 xor %eax,%eax
24294 EXIT
24295 ENDPROC(__put_user_1)
24296
24297 ENTRY(__put_user_2)
24298 ENTER
24299 +
24300 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24301 + GET_THREAD_INFO(%_ASM_BX)
24302 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24303 sub $1,%_ASM_BX
24304 cmp %_ASM_BX,%_ASM_CX
24305 jae bad_put_user
24306 -2: movw %ax,(%_ASM_CX)
24307 +
24308 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24309 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24310 + cmp %_ASM_BX,%_ASM_CX
24311 + jb 1234f
24312 + xor %ebx,%ebx
24313 +1234:
24314 +#endif
24315 +
24316 +#endif
24317 +
24318 +2: __copyuser_seg movw %ax,(_DEST)
24319 xor %eax,%eax
24320 EXIT
24321 ENDPROC(__put_user_2)
24322
24323 ENTRY(__put_user_4)
24324 ENTER
24325 +
24326 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24327 + GET_THREAD_INFO(%_ASM_BX)
24328 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24329 sub $3,%_ASM_BX
24330 cmp %_ASM_BX,%_ASM_CX
24331 jae bad_put_user
24332 -3: movl %eax,(%_ASM_CX)
24333 +
24334 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24335 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24336 + cmp %_ASM_BX,%_ASM_CX
24337 + jb 1234f
24338 + xor %ebx,%ebx
24339 +1234:
24340 +#endif
24341 +
24342 +#endif
24343 +
24344 +3: __copyuser_seg movl %eax,(_DEST)
24345 xor %eax,%eax
24346 EXIT
24347 ENDPROC(__put_user_4)
24348
24349 ENTRY(__put_user_8)
24350 ENTER
24351 +
24352 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24353 + GET_THREAD_INFO(%_ASM_BX)
24354 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24355 sub $7,%_ASM_BX
24356 cmp %_ASM_BX,%_ASM_CX
24357 jae bad_put_user
24358 -4: mov %_ASM_AX,(%_ASM_CX)
24359 +
24360 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24361 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24362 + cmp %_ASM_BX,%_ASM_CX
24363 + jb 1234f
24364 + xor %ebx,%ebx
24365 +1234:
24366 +#endif
24367 +
24368 +#endif
24369 +
24370 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
24371 #ifdef CONFIG_X86_32
24372 -5: movl %edx,4(%_ASM_CX)
24373 +5: __copyuser_seg movl %edx,4(_DEST)
24374 #endif
24375 xor %eax,%eax
24376 EXIT
24377 diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
24378 index 05ea55f..6345b9a 100644
24379 --- a/arch/x86/lib/rwlock_64.S
24380 +++ b/arch/x86/lib/rwlock_64.S
24381 @@ -2,6 +2,7 @@
24382
24383 #include <linux/linkage.h>
24384 #include <asm/rwlock.h>
24385 +#include <asm/asm.h>
24386 #include <asm/alternative-asm.h>
24387 #include <asm/dwarf2.h>
24388
24389 @@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
24390 CFI_STARTPROC
24391 LOCK_PREFIX
24392 addl $RW_LOCK_BIAS,(%rdi)
24393 +
24394 +#ifdef CONFIG_PAX_REFCOUNT
24395 + jno 1234f
24396 + LOCK_PREFIX
24397 + subl $RW_LOCK_BIAS,(%rdi)
24398 + int $4
24399 +1234:
24400 + _ASM_EXTABLE(1234b, 1234b)
24401 +#endif
24402 +
24403 1: rep
24404 nop
24405 cmpl $RW_LOCK_BIAS,(%rdi)
24406 jne 1b
24407 LOCK_PREFIX
24408 subl $RW_LOCK_BIAS,(%rdi)
24409 +
24410 +#ifdef CONFIG_PAX_REFCOUNT
24411 + jno 1234f
24412 + LOCK_PREFIX
24413 + addl $RW_LOCK_BIAS,(%rdi)
24414 + int $4
24415 +1234:
24416 + _ASM_EXTABLE(1234b, 1234b)
24417 +#endif
24418 +
24419 jnz __write_lock_failed
24420 + pax_force_retaddr
24421 ret
24422 CFI_ENDPROC
24423 END(__write_lock_failed)
24424 @@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
24425 CFI_STARTPROC
24426 LOCK_PREFIX
24427 incl (%rdi)
24428 +
24429 +#ifdef CONFIG_PAX_REFCOUNT
24430 + jno 1234f
24431 + LOCK_PREFIX
24432 + decl (%rdi)
24433 + int $4
24434 +1234:
24435 + _ASM_EXTABLE(1234b, 1234b)
24436 +#endif
24437 +
24438 1: rep
24439 nop
24440 cmpl $1,(%rdi)
24441 js 1b
24442 LOCK_PREFIX
24443 decl (%rdi)
24444 +
24445 +#ifdef CONFIG_PAX_REFCOUNT
24446 + jno 1234f
24447 + LOCK_PREFIX
24448 + incl (%rdi)
24449 + int $4
24450 +1234:
24451 + _ASM_EXTABLE(1234b, 1234b)
24452 +#endif
24453 +
24454 js __read_lock_failed
24455 + pax_force_retaddr
24456 ret
24457 CFI_ENDPROC
24458 END(__read_lock_failed)
24459 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
24460 index 15acecf..f768b10 100644
24461 --- a/arch/x86/lib/rwsem_64.S
24462 +++ b/arch/x86/lib/rwsem_64.S
24463 @@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
24464 call rwsem_down_read_failed
24465 popq %rdx
24466 restore_common_regs
24467 + pax_force_retaddr
24468 ret
24469 ENDPROC(call_rwsem_down_read_failed)
24470
24471 @@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
24472 movq %rax,%rdi
24473 call rwsem_down_write_failed
24474 restore_common_regs
24475 + pax_force_retaddr
24476 ret
24477 ENDPROC(call_rwsem_down_write_failed)
24478
24479 @@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
24480 movq %rax,%rdi
24481 call rwsem_wake
24482 restore_common_regs
24483 -1: ret
24484 +1: pax_force_retaddr
24485 + ret
24486 ENDPROC(call_rwsem_wake)
24487
24488 /* Fix up special calling conventions */
24489 @@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
24490 call rwsem_downgrade_wake
24491 popq %rdx
24492 restore_common_regs
24493 + pax_force_retaddr
24494 ret
24495 ENDPROC(call_rwsem_downgrade_wake)
24496 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
24497 index bf9a7d5..fb06ab5 100644
24498 --- a/arch/x86/lib/thunk_64.S
24499 +++ b/arch/x86/lib/thunk_64.S
24500 @@ -10,7 +10,8 @@
24501 #include <asm/dwarf2.h>
24502 #include <asm/calling.h>
24503 #include <asm/rwlock.h>
24504 -
24505 + #include <asm/alternative-asm.h>
24506 +
24507 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
24508 .macro thunk name,func
24509 .globl \name
24510 @@ -70,6 +71,7 @@
24511 SAVE_ARGS
24512 restore:
24513 RESTORE_ARGS
24514 + pax_force_retaddr
24515 ret
24516 CFI_ENDPROC
24517
24518 @@ -77,5 +79,6 @@ restore:
24519 SAVE_ARGS
24520 restore_norax:
24521 RESTORE_ARGS 1
24522 + pax_force_retaddr
24523 ret
24524 CFI_ENDPROC
24525 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
24526 index 1f118d4..8e0ead9 100644
24527 --- a/arch/x86/lib/usercopy_32.c
24528 +++ b/arch/x86/lib/usercopy_32.c
24529 @@ -43,7 +43,7 @@ do { \
24530 __asm__ __volatile__( \
24531 " testl %1,%1\n" \
24532 " jz 2f\n" \
24533 - "0: lodsb\n" \
24534 + "0: "__copyuser_seg"lodsb\n" \
24535 " stosb\n" \
24536 " testb %%al,%%al\n" \
24537 " jz 1f\n" \
24538 @@ -83,7 +83,7 @@ do { \
24539 * and returns @count.
24540 */
24541 long
24542 -__strncpy_from_user(char *dst, const char __user *src, long count)
24543 +__strncpy_from_user(char *dst, const char __user *src, unsigned long count)
24544 {
24545 long res;
24546 __do_strncpy_from_user(dst, src, count, res);
24547 @@ -110,7 +110,7 @@ EXPORT_SYMBOL(__strncpy_from_user);
24548 * and returns @count.
24549 */
24550 long
24551 -strncpy_from_user(char *dst, const char __user *src, long count)
24552 +strncpy_from_user(char *dst, const char __user *src, unsigned long count)
24553 {
24554 long res = -EFAULT;
24555 if (access_ok(VERIFY_READ, src, 1))
24556 @@ -128,10 +128,12 @@ do { \
24557 int __d0; \
24558 might_fault(); \
24559 __asm__ __volatile__( \
24560 + __COPYUSER_SET_ES \
24561 "0: rep; stosl\n" \
24562 " movl %2,%0\n" \
24563 "1: rep; stosb\n" \
24564 "2:\n" \
24565 + __COPYUSER_RESTORE_ES \
24566 ".section .fixup,\"ax\"\n" \
24567 "3: lea 0(%2,%0,4),%0\n" \
24568 " jmp 2b\n" \
24569 @@ -192,7 +194,7 @@ EXPORT_SYMBOL(__clear_user);
24570 * On exception, returns 0.
24571 * If the string is too long, returns a value greater than @n.
24572 */
24573 -long strnlen_user(const char __user *s, long n)
24574 +long strnlen_user(const char __user *s, unsigned long n)
24575 {
24576 unsigned long mask = -__addr_ok(s);
24577 unsigned long res, tmp;
24578 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
24579 might_fault();
24580
24581 __asm__ __volatile__(
24582 + __COPYUSER_SET_ES
24583 " testl %0, %0\n"
24584 " jz 3f\n"
24585 " andl %0,%%ecx\n"
24586 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
24587 " subl %%ecx,%0\n"
24588 " addl %0,%%eax\n"
24589 "1:\n"
24590 + __COPYUSER_RESTORE_ES
24591 ".section .fixup,\"ax\"\n"
24592 "2: xorl %%eax,%%eax\n"
24593 " jmp 1b\n"
24594 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
24595
24596 #ifdef CONFIG_X86_INTEL_USERCOPY
24597 static unsigned long
24598 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
24599 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
24600 {
24601 int d0, d1;
24602 __asm__ __volatile__(
24603 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24604 " .align 2,0x90\n"
24605 "3: movl 0(%4), %%eax\n"
24606 "4: movl 4(%4), %%edx\n"
24607 - "5: movl %%eax, 0(%3)\n"
24608 - "6: movl %%edx, 4(%3)\n"
24609 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
24610 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
24611 "7: movl 8(%4), %%eax\n"
24612 "8: movl 12(%4),%%edx\n"
24613 - "9: movl %%eax, 8(%3)\n"
24614 - "10: movl %%edx, 12(%3)\n"
24615 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
24616 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
24617 "11: movl 16(%4), %%eax\n"
24618 "12: movl 20(%4), %%edx\n"
24619 - "13: movl %%eax, 16(%3)\n"
24620 - "14: movl %%edx, 20(%3)\n"
24621 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
24622 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
24623 "15: movl 24(%4), %%eax\n"
24624 "16: movl 28(%4), %%edx\n"
24625 - "17: movl %%eax, 24(%3)\n"
24626 - "18: movl %%edx, 28(%3)\n"
24627 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
24628 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
24629 "19: movl 32(%4), %%eax\n"
24630 "20: movl 36(%4), %%edx\n"
24631 - "21: movl %%eax, 32(%3)\n"
24632 - "22: movl %%edx, 36(%3)\n"
24633 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
24634 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
24635 "23: movl 40(%4), %%eax\n"
24636 "24: movl 44(%4), %%edx\n"
24637 - "25: movl %%eax, 40(%3)\n"
24638 - "26: movl %%edx, 44(%3)\n"
24639 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
24640 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
24641 "27: movl 48(%4), %%eax\n"
24642 "28: movl 52(%4), %%edx\n"
24643 - "29: movl %%eax, 48(%3)\n"
24644 - "30: movl %%edx, 52(%3)\n"
24645 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
24646 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
24647 "31: movl 56(%4), %%eax\n"
24648 "32: movl 60(%4), %%edx\n"
24649 - "33: movl %%eax, 56(%3)\n"
24650 - "34: movl %%edx, 60(%3)\n"
24651 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
24652 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
24653 " addl $-64, %0\n"
24654 " addl $64, %4\n"
24655 " addl $64, %3\n"
24656 @@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24657 " shrl $2, %0\n"
24658 " andl $3, %%eax\n"
24659 " cld\n"
24660 + __COPYUSER_SET_ES
24661 "99: rep; movsl\n"
24662 "36: movl %%eax, %0\n"
24663 "37: rep; movsb\n"
24664 "100:\n"
24665 + __COPYUSER_RESTORE_ES
24666 ".section .fixup,\"ax\"\n"
24667 "101: lea 0(%%eax,%0,4),%0\n"
24668 " jmp 100b\n"
24669 @@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24670 }
24671
24672 static unsigned long
24673 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
24674 +{
24675 + int d0, d1;
24676 + __asm__ __volatile__(
24677 + " .align 2,0x90\n"
24678 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
24679 + " cmpl $67, %0\n"
24680 + " jbe 3f\n"
24681 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
24682 + " .align 2,0x90\n"
24683 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
24684 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
24685 + "5: movl %%eax, 0(%3)\n"
24686 + "6: movl %%edx, 4(%3)\n"
24687 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
24688 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
24689 + "9: movl %%eax, 8(%3)\n"
24690 + "10: movl %%edx, 12(%3)\n"
24691 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
24692 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
24693 + "13: movl %%eax, 16(%3)\n"
24694 + "14: movl %%edx, 20(%3)\n"
24695 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
24696 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
24697 + "17: movl %%eax, 24(%3)\n"
24698 + "18: movl %%edx, 28(%3)\n"
24699 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
24700 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
24701 + "21: movl %%eax, 32(%3)\n"
24702 + "22: movl %%edx, 36(%3)\n"
24703 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
24704 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
24705 + "25: movl %%eax, 40(%3)\n"
24706 + "26: movl %%edx, 44(%3)\n"
24707 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
24708 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
24709 + "29: movl %%eax, 48(%3)\n"
24710 + "30: movl %%edx, 52(%3)\n"
24711 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
24712 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
24713 + "33: movl %%eax, 56(%3)\n"
24714 + "34: movl %%edx, 60(%3)\n"
24715 + " addl $-64, %0\n"
24716 + " addl $64, %4\n"
24717 + " addl $64, %3\n"
24718 + " cmpl $63, %0\n"
24719 + " ja 1b\n"
24720 + "35: movl %0, %%eax\n"
24721 + " shrl $2, %0\n"
24722 + " andl $3, %%eax\n"
24723 + " cld\n"
24724 + "99: rep; "__copyuser_seg" movsl\n"
24725 + "36: movl %%eax, %0\n"
24726 + "37: rep; "__copyuser_seg" movsb\n"
24727 + "100:\n"
24728 + ".section .fixup,\"ax\"\n"
24729 + "101: lea 0(%%eax,%0,4),%0\n"
24730 + " jmp 100b\n"
24731 + ".previous\n"
24732 + ".section __ex_table,\"a\"\n"
24733 + " .align 4\n"
24734 + " .long 1b,100b\n"
24735 + " .long 2b,100b\n"
24736 + " .long 3b,100b\n"
24737 + " .long 4b,100b\n"
24738 + " .long 5b,100b\n"
24739 + " .long 6b,100b\n"
24740 + " .long 7b,100b\n"
24741 + " .long 8b,100b\n"
24742 + " .long 9b,100b\n"
24743 + " .long 10b,100b\n"
24744 + " .long 11b,100b\n"
24745 + " .long 12b,100b\n"
24746 + " .long 13b,100b\n"
24747 + " .long 14b,100b\n"
24748 + " .long 15b,100b\n"
24749 + " .long 16b,100b\n"
24750 + " .long 17b,100b\n"
24751 + " .long 18b,100b\n"
24752 + " .long 19b,100b\n"
24753 + " .long 20b,100b\n"
24754 + " .long 21b,100b\n"
24755 + " .long 22b,100b\n"
24756 + " .long 23b,100b\n"
24757 + " .long 24b,100b\n"
24758 + " .long 25b,100b\n"
24759 + " .long 26b,100b\n"
24760 + " .long 27b,100b\n"
24761 + " .long 28b,100b\n"
24762 + " .long 29b,100b\n"
24763 + " .long 30b,100b\n"
24764 + " .long 31b,100b\n"
24765 + " .long 32b,100b\n"
24766 + " .long 33b,100b\n"
24767 + " .long 34b,100b\n"
24768 + " .long 35b,100b\n"
24769 + " .long 36b,100b\n"
24770 + " .long 37b,100b\n"
24771 + " .long 99b,101b\n"
24772 + ".previous"
24773 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
24774 + : "1"(to), "2"(from), "0"(size)
24775 + : "eax", "edx", "memory");
24776 + return size;
24777 +}
24778 +
24779 +static unsigned long
24780 +__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
24781 +static unsigned long
24782 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24783 {
24784 int d0, d1;
24785 __asm__ __volatile__(
24786 " .align 2,0x90\n"
24787 - "0: movl 32(%4), %%eax\n"
24788 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24789 " cmpl $67, %0\n"
24790 " jbe 2f\n"
24791 - "1: movl 64(%4), %%eax\n"
24792 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24793 " .align 2,0x90\n"
24794 - "2: movl 0(%4), %%eax\n"
24795 - "21: movl 4(%4), %%edx\n"
24796 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24797 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24798 " movl %%eax, 0(%3)\n"
24799 " movl %%edx, 4(%3)\n"
24800 - "3: movl 8(%4), %%eax\n"
24801 - "31: movl 12(%4),%%edx\n"
24802 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24803 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24804 " movl %%eax, 8(%3)\n"
24805 " movl %%edx, 12(%3)\n"
24806 - "4: movl 16(%4), %%eax\n"
24807 - "41: movl 20(%4), %%edx\n"
24808 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24809 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24810 " movl %%eax, 16(%3)\n"
24811 " movl %%edx, 20(%3)\n"
24812 - "10: movl 24(%4), %%eax\n"
24813 - "51: movl 28(%4), %%edx\n"
24814 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24815 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24816 " movl %%eax, 24(%3)\n"
24817 " movl %%edx, 28(%3)\n"
24818 - "11: movl 32(%4), %%eax\n"
24819 - "61: movl 36(%4), %%edx\n"
24820 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24821 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24822 " movl %%eax, 32(%3)\n"
24823 " movl %%edx, 36(%3)\n"
24824 - "12: movl 40(%4), %%eax\n"
24825 - "71: movl 44(%4), %%edx\n"
24826 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24827 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24828 " movl %%eax, 40(%3)\n"
24829 " movl %%edx, 44(%3)\n"
24830 - "13: movl 48(%4), %%eax\n"
24831 - "81: movl 52(%4), %%edx\n"
24832 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24833 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24834 " movl %%eax, 48(%3)\n"
24835 " movl %%edx, 52(%3)\n"
24836 - "14: movl 56(%4), %%eax\n"
24837 - "91: movl 60(%4), %%edx\n"
24838 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24839 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24840 " movl %%eax, 56(%3)\n"
24841 " movl %%edx, 60(%3)\n"
24842 " addl $-64, %0\n"
24843 @@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24844 " shrl $2, %0\n"
24845 " andl $3, %%eax\n"
24846 " cld\n"
24847 - "6: rep; movsl\n"
24848 + "6: rep; "__copyuser_seg" movsl\n"
24849 " movl %%eax,%0\n"
24850 - "7: rep; movsb\n"
24851 + "7: rep; "__copyuser_seg" movsb\n"
24852 "8:\n"
24853 ".section .fixup,\"ax\"\n"
24854 "9: lea 0(%%eax,%0,4),%0\n"
24855 @@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24856 */
24857
24858 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24859 + const void __user *from, unsigned long size) __size_overflow(3);
24860 +static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24861 const void __user *from, unsigned long size)
24862 {
24863 int d0, d1;
24864
24865 __asm__ __volatile__(
24866 " .align 2,0x90\n"
24867 - "0: movl 32(%4), %%eax\n"
24868 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24869 " cmpl $67, %0\n"
24870 " jbe 2f\n"
24871 - "1: movl 64(%4), %%eax\n"
24872 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24873 " .align 2,0x90\n"
24874 - "2: movl 0(%4), %%eax\n"
24875 - "21: movl 4(%4), %%edx\n"
24876 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24877 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24878 " movnti %%eax, 0(%3)\n"
24879 " movnti %%edx, 4(%3)\n"
24880 - "3: movl 8(%4), %%eax\n"
24881 - "31: movl 12(%4),%%edx\n"
24882 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24883 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24884 " movnti %%eax, 8(%3)\n"
24885 " movnti %%edx, 12(%3)\n"
24886 - "4: movl 16(%4), %%eax\n"
24887 - "41: movl 20(%4), %%edx\n"
24888 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24889 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24890 " movnti %%eax, 16(%3)\n"
24891 " movnti %%edx, 20(%3)\n"
24892 - "10: movl 24(%4), %%eax\n"
24893 - "51: movl 28(%4), %%edx\n"
24894 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24895 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24896 " movnti %%eax, 24(%3)\n"
24897 " movnti %%edx, 28(%3)\n"
24898 - "11: movl 32(%4), %%eax\n"
24899 - "61: movl 36(%4), %%edx\n"
24900 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24901 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24902 " movnti %%eax, 32(%3)\n"
24903 " movnti %%edx, 36(%3)\n"
24904 - "12: movl 40(%4), %%eax\n"
24905 - "71: movl 44(%4), %%edx\n"
24906 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24907 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24908 " movnti %%eax, 40(%3)\n"
24909 " movnti %%edx, 44(%3)\n"
24910 - "13: movl 48(%4), %%eax\n"
24911 - "81: movl 52(%4), %%edx\n"
24912 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24913 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24914 " movnti %%eax, 48(%3)\n"
24915 " movnti %%edx, 52(%3)\n"
24916 - "14: movl 56(%4), %%eax\n"
24917 - "91: movl 60(%4), %%edx\n"
24918 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24919 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24920 " movnti %%eax, 56(%3)\n"
24921 " movnti %%edx, 60(%3)\n"
24922 " addl $-64, %0\n"
24923 @@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24924 " shrl $2, %0\n"
24925 " andl $3, %%eax\n"
24926 " cld\n"
24927 - "6: rep; movsl\n"
24928 + "6: rep; "__copyuser_seg" movsl\n"
24929 " movl %%eax,%0\n"
24930 - "7: rep; movsb\n"
24931 + "7: rep; "__copyuser_seg" movsb\n"
24932 "8:\n"
24933 ".section .fixup,\"ax\"\n"
24934 "9: lea 0(%%eax,%0,4),%0\n"
24935 @@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24936 }
24937
24938 static unsigned long __copy_user_intel_nocache(void *to,
24939 + const void __user *from, unsigned long size) __size_overflow(3);
24940 +static unsigned long __copy_user_intel_nocache(void *to,
24941 const void __user *from, unsigned long size)
24942 {
24943 int d0, d1;
24944
24945 __asm__ __volatile__(
24946 " .align 2,0x90\n"
24947 - "0: movl 32(%4), %%eax\n"
24948 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24949 " cmpl $67, %0\n"
24950 " jbe 2f\n"
24951 - "1: movl 64(%4), %%eax\n"
24952 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24953 " .align 2,0x90\n"
24954 - "2: movl 0(%4), %%eax\n"
24955 - "21: movl 4(%4), %%edx\n"
24956 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24957 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24958 " movnti %%eax, 0(%3)\n"
24959 " movnti %%edx, 4(%3)\n"
24960 - "3: movl 8(%4), %%eax\n"
24961 - "31: movl 12(%4),%%edx\n"
24962 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24963 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24964 " movnti %%eax, 8(%3)\n"
24965 " movnti %%edx, 12(%3)\n"
24966 - "4: movl 16(%4), %%eax\n"
24967 - "41: movl 20(%4), %%edx\n"
24968 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24969 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24970 " movnti %%eax, 16(%3)\n"
24971 " movnti %%edx, 20(%3)\n"
24972 - "10: movl 24(%4), %%eax\n"
24973 - "51: movl 28(%4), %%edx\n"
24974 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24975 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24976 " movnti %%eax, 24(%3)\n"
24977 " movnti %%edx, 28(%3)\n"
24978 - "11: movl 32(%4), %%eax\n"
24979 - "61: movl 36(%4), %%edx\n"
24980 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24981 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24982 " movnti %%eax, 32(%3)\n"
24983 " movnti %%edx, 36(%3)\n"
24984 - "12: movl 40(%4), %%eax\n"
24985 - "71: movl 44(%4), %%edx\n"
24986 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24987 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24988 " movnti %%eax, 40(%3)\n"
24989 " movnti %%edx, 44(%3)\n"
24990 - "13: movl 48(%4), %%eax\n"
24991 - "81: movl 52(%4), %%edx\n"
24992 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24993 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24994 " movnti %%eax, 48(%3)\n"
24995 " movnti %%edx, 52(%3)\n"
24996 - "14: movl 56(%4), %%eax\n"
24997 - "91: movl 60(%4), %%edx\n"
24998 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24999 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
25000 " movnti %%eax, 56(%3)\n"
25001 " movnti %%edx, 60(%3)\n"
25002 " addl $-64, %0\n"
25003 @@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
25004 " shrl $2, %0\n"
25005 " andl $3, %%eax\n"
25006 " cld\n"
25007 - "6: rep; movsl\n"
25008 + "6: rep; "__copyuser_seg" movsl\n"
25009 " movl %%eax,%0\n"
25010 - "7: rep; movsb\n"
25011 + "7: rep; "__copyuser_seg" movsb\n"
25012 "8:\n"
25013 ".section .fixup,\"ax\"\n"
25014 "9: lea 0(%%eax,%0,4),%0\n"
25015 @@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
25016 */
25017 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
25018 unsigned long size);
25019 -unsigned long __copy_user_intel(void __user *to, const void *from,
25020 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
25021 + unsigned long size);
25022 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
25023 unsigned long size);
25024 unsigned long __copy_user_zeroing_intel_nocache(void *to,
25025 const void __user *from, unsigned long size);
25026 #endif /* CONFIG_X86_INTEL_USERCOPY */
25027
25028 /* Generic arbitrary sized copy. */
25029 -#define __copy_user(to, from, size) \
25030 +#define __copy_user(to, from, size, prefix, set, restore) \
25031 do { \
25032 int __d0, __d1, __d2; \
25033 __asm__ __volatile__( \
25034 + set \
25035 " cmp $7,%0\n" \
25036 " jbe 1f\n" \
25037 " movl %1,%0\n" \
25038 " negl %0\n" \
25039 " andl $7,%0\n" \
25040 " subl %0,%3\n" \
25041 - "4: rep; movsb\n" \
25042 + "4: rep; "prefix"movsb\n" \
25043 " movl %3,%0\n" \
25044 " shrl $2,%0\n" \
25045 " andl $3,%3\n" \
25046 " .align 2,0x90\n" \
25047 - "0: rep; movsl\n" \
25048 + "0: rep; "prefix"movsl\n" \
25049 " movl %3,%0\n" \
25050 - "1: rep; movsb\n" \
25051 + "1: rep; "prefix"movsb\n" \
25052 "2:\n" \
25053 + restore \
25054 ".section .fixup,\"ax\"\n" \
25055 "5: addl %3,%0\n" \
25056 " jmp 2b\n" \
25057 @@ -682,14 +805,14 @@ do { \
25058 " negl %0\n" \
25059 " andl $7,%0\n" \
25060 " subl %0,%3\n" \
25061 - "4: rep; movsb\n" \
25062 + "4: rep; "__copyuser_seg"movsb\n" \
25063 " movl %3,%0\n" \
25064 " shrl $2,%0\n" \
25065 " andl $3,%3\n" \
25066 " .align 2,0x90\n" \
25067 - "0: rep; movsl\n" \
25068 + "0: rep; "__copyuser_seg"movsl\n" \
25069 " movl %3,%0\n" \
25070 - "1: rep; movsb\n" \
25071 + "1: rep; "__copyuser_seg"movsb\n" \
25072 "2:\n" \
25073 ".section .fixup,\"ax\"\n" \
25074 "5: addl %3,%0\n" \
25075 @@ -775,9 +898,9 @@ survive:
25076 }
25077 #endif
25078 if (movsl_is_ok(to, from, n))
25079 - __copy_user(to, from, n);
25080 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
25081 else
25082 - n = __copy_user_intel(to, from, n);
25083 + n = __generic_copy_to_user_intel(to, from, n);
25084 return n;
25085 }
25086 EXPORT_SYMBOL(__copy_to_user_ll);
25087 @@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
25088 unsigned long n)
25089 {
25090 if (movsl_is_ok(to, from, n))
25091 - __copy_user(to, from, n);
25092 + __copy_user(to, from, n, __copyuser_seg, "", "");
25093 else
25094 - n = __copy_user_intel((void __user *)to,
25095 - (const void *)from, n);
25096 + n = __generic_copy_from_user_intel(to, from, n);
25097 return n;
25098 }
25099 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
25100 @@ -827,59 +949,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
25101 if (n > 64 && cpu_has_xmm2)
25102 n = __copy_user_intel_nocache(to, from, n);
25103 else
25104 - __copy_user(to, from, n);
25105 + __copy_user(to, from, n, __copyuser_seg, "", "");
25106 #else
25107 - __copy_user(to, from, n);
25108 + __copy_user(to, from, n, __copyuser_seg, "", "");
25109 #endif
25110 return n;
25111 }
25112 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
25113
25114 -/**
25115 - * copy_to_user: - Copy a block of data into user space.
25116 - * @to: Destination address, in user space.
25117 - * @from: Source address, in kernel space.
25118 - * @n: Number of bytes to copy.
25119 - *
25120 - * Context: User context only. This function may sleep.
25121 - *
25122 - * Copy data from kernel space to user space.
25123 - *
25124 - * Returns number of bytes that could not be copied.
25125 - * On success, this will be zero.
25126 - */
25127 -unsigned long
25128 -copy_to_user(void __user *to, const void *from, unsigned long n)
25129 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25130 +void __set_fs(mm_segment_t x)
25131 {
25132 - if (access_ok(VERIFY_WRITE, to, n))
25133 - n = __copy_to_user(to, from, n);
25134 - return n;
25135 + switch (x.seg) {
25136 + case 0:
25137 + loadsegment(gs, 0);
25138 + break;
25139 + case TASK_SIZE_MAX:
25140 + loadsegment(gs, __USER_DS);
25141 + break;
25142 + case -1UL:
25143 + loadsegment(gs, __KERNEL_DS);
25144 + break;
25145 + default:
25146 + BUG();
25147 + }
25148 + return;
25149 }
25150 -EXPORT_SYMBOL(copy_to_user);
25151 +EXPORT_SYMBOL(__set_fs);
25152
25153 -/**
25154 - * copy_from_user: - Copy a block of data from user space.
25155 - * @to: Destination address, in kernel space.
25156 - * @from: Source address, in user space.
25157 - * @n: Number of bytes to copy.
25158 - *
25159 - * Context: User context only. This function may sleep.
25160 - *
25161 - * Copy data from user space to kernel space.
25162 - *
25163 - * Returns number of bytes that could not be copied.
25164 - * On success, this will be zero.
25165 - *
25166 - * If some data could not be copied, this function will pad the copied
25167 - * data to the requested size using zero bytes.
25168 - */
25169 -unsigned long
25170 -copy_from_user(void *to, const void __user *from, unsigned long n)
25171 +void set_fs(mm_segment_t x)
25172 {
25173 - if (access_ok(VERIFY_READ, from, n))
25174 - n = __copy_from_user(to, from, n);
25175 - else
25176 - memset(to, 0, n);
25177 - return n;
25178 + current_thread_info()->addr_limit = x;
25179 + __set_fs(x);
25180 }
25181 -EXPORT_SYMBOL(copy_from_user);
25182 +EXPORT_SYMBOL(set_fs);
25183 +#endif
25184 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
25185 index b7c2849..bab76d3 100644
25186 --- a/arch/x86/lib/usercopy_64.c
25187 +++ b/arch/x86/lib/usercopy_64.c
25188 @@ -39,16 +39,22 @@ do { \
25189 } while (0)
25190
25191 long
25192 -__strncpy_from_user(char *dst, const char __user *src, long count)
25193 +__strncpy_from_user(char *dst, const char __user *src, unsigned long count)
25194 {
25195 long res;
25196 +
25197 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25198 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
25199 + src += PAX_USER_SHADOW_BASE;
25200 +#endif
25201 +
25202 __do_strncpy_from_user(dst, src, count, res);
25203 return res;
25204 }
25205 EXPORT_SYMBOL(__strncpy_from_user);
25206
25207 long
25208 -strncpy_from_user(char *dst, const char __user *src, long count)
25209 +strncpy_from_user(char *dst, const char __user *src, unsigned long count)
25210 {
25211 long res = -EFAULT;
25212 if (access_ok(VERIFY_READ, src, 1))
25213 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
25214 {
25215 long __d0;
25216 might_fault();
25217 +
25218 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25219 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
25220 + addr += PAX_USER_SHADOW_BASE;
25221 +#endif
25222 +
25223 /* no memory constraint because it doesn't change any memory gcc knows
25224 about */
25225 asm volatile(
25226 @@ -107,7 +119,7 @@ EXPORT_SYMBOL(clear_user);
25227 * Return 0 on exception, a value greater than N if too long
25228 */
25229
25230 -long __strnlen_user(const char __user *s, long n)
25231 +long __strnlen_user(const char __user *s, unsigned long n)
25232 {
25233 long res = 0;
25234 char c;
25235 @@ -125,7 +137,7 @@ long __strnlen_user(const char __user *s, long n)
25236 }
25237 EXPORT_SYMBOL(__strnlen_user);
25238
25239 -long strnlen_user(const char __user *s, long n)
25240 +long strnlen_user(const char __user *s, unsigned long n)
25241 {
25242 if (!access_ok(VERIFY_READ, s, 1))
25243 return 0;
25244 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
25245 }
25246 EXPORT_SYMBOL(strlen_user);
25247
25248 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
25249 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
25250 {
25251 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
25252 - return copy_user_generic((__force void *)to, (__force void *)from, len);
25253 - }
25254 - return len;
25255 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
25256 +
25257 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25258 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
25259 + to += PAX_USER_SHADOW_BASE;
25260 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
25261 + from += PAX_USER_SHADOW_BASE;
25262 +#endif
25263 +
25264 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
25265 + }
25266 + return len;
25267 }
25268 EXPORT_SYMBOL(copy_in_user);
25269
25270 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
25271 * it is not necessary to optimize tail handling.
25272 */
25273 unsigned long
25274 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
25275 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
25276 {
25277 char c;
25278 unsigned zero_len;
25279 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
25280 index 61b41ca..5fef66a 100644
25281 --- a/arch/x86/mm/extable.c
25282 +++ b/arch/x86/mm/extable.c
25283 @@ -1,14 +1,71 @@
25284 #include <linux/module.h>
25285 #include <linux/spinlock.h>
25286 +#include <linux/sort.h>
25287 #include <asm/uaccess.h>
25288 +#include <asm/pgtable.h>
25289
25290 +/*
25291 + * The exception table needs to be sorted so that the binary
25292 + * search that we use to find entries in it works properly.
25293 + * This is used both for the kernel exception table and for
25294 + * the exception tables of modules that get loaded.
25295 + */
25296 +static int cmp_ex(const void *a, const void *b)
25297 +{
25298 + const struct exception_table_entry *x = a, *y = b;
25299 +
25300 + /* avoid overflow */
25301 + if (x->insn > y->insn)
25302 + return 1;
25303 + if (x->insn < y->insn)
25304 + return -1;
25305 + return 0;
25306 +}
25307 +
25308 +static void swap_ex(void *a, void *b, int size)
25309 +{
25310 + struct exception_table_entry t, *x = a, *y = b;
25311 +
25312 + t = *x;
25313 +
25314 + pax_open_kernel();
25315 + *x = *y;
25316 + *y = t;
25317 + pax_close_kernel();
25318 +}
25319 +
25320 +void sort_extable(struct exception_table_entry *start,
25321 + struct exception_table_entry *finish)
25322 +{
25323 + sort(start, finish - start, sizeof(struct exception_table_entry),
25324 + cmp_ex, swap_ex);
25325 +}
25326 +
25327 +#ifdef CONFIG_MODULES
25328 +/*
25329 + * If the exception table is sorted, any referring to the module init
25330 + * will be at the beginning or the end.
25331 + */
25332 +void trim_init_extable(struct module *m)
25333 +{
25334 + /*trim the beginning*/
25335 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
25336 + m->extable++;
25337 + m->num_exentries--;
25338 + }
25339 + /*trim the end*/
25340 + while (m->num_exentries &&
25341 + within_module_init(m->extable[m->num_exentries-1].insn, m))
25342 + m->num_exentries--;
25343 +}
25344 +#endif /* CONFIG_MODULES */
25345
25346 int fixup_exception(struct pt_regs *regs)
25347 {
25348 const struct exception_table_entry *fixup;
25349
25350 #ifdef CONFIG_PNPBIOS
25351 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
25352 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
25353 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
25354 extern u32 pnp_bios_is_utter_crap;
25355 pnp_bios_is_utter_crap = 1;
25356 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
25357 index 8ac0d76..ca501e2 100644
25358 --- a/arch/x86/mm/fault.c
25359 +++ b/arch/x86/mm/fault.c
25360 @@ -11,10 +11,19 @@
25361 #include <linux/kprobes.h> /* __kprobes, ... */
25362 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
25363 #include <linux/perf_event.h> /* perf_sw_event */
25364 +#include <linux/unistd.h>
25365 +#include <linux/compiler.h>
25366
25367 #include <asm/traps.h> /* dotraplinkage, ... */
25368 #include <asm/pgalloc.h> /* pgd_*(), ... */
25369 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
25370 +#include <asm/vsyscall.h>
25371 +#include <asm/tlbflush.h>
25372 +
25373 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25374 +#include <asm/stacktrace.h>
25375 +#include "../kernel/dumpstack.h"
25376 +#endif
25377
25378 /*
25379 * Page fault error code bits:
25380 @@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
25381 int ret = 0;
25382
25383 /* kprobe_running() needs smp_processor_id() */
25384 - if (kprobes_built_in() && !user_mode_vm(regs)) {
25385 + if (kprobes_built_in() && !user_mode(regs)) {
25386 preempt_disable();
25387 if (kprobe_running() && kprobe_fault_handler(regs, 14))
25388 ret = 1;
25389 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
25390 return !instr_lo || (instr_lo>>1) == 1;
25391 case 0x00:
25392 /* Prefetch instruction is 0x0F0D or 0x0F18 */
25393 - if (probe_kernel_address(instr, opcode))
25394 + if (user_mode(regs)) {
25395 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25396 + return 0;
25397 + } else if (probe_kernel_address(instr, opcode))
25398 return 0;
25399
25400 *prefetch = (instr_lo == 0xF) &&
25401 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
25402 while (instr < max_instr) {
25403 unsigned char opcode;
25404
25405 - if (probe_kernel_address(instr, opcode))
25406 + if (user_mode(regs)) {
25407 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25408 + break;
25409 + } else if (probe_kernel_address(instr, opcode))
25410 break;
25411
25412 instr++;
25413 @@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
25414 force_sig_info(si_signo, &info, tsk);
25415 }
25416
25417 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25418 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
25419 +#endif
25420 +
25421 +#ifdef CONFIG_PAX_EMUTRAMP
25422 +static int pax_handle_fetch_fault(struct pt_regs *regs);
25423 +#endif
25424 +
25425 +#ifdef CONFIG_PAX_PAGEEXEC
25426 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
25427 +{
25428 + pgd_t *pgd;
25429 + pud_t *pud;
25430 + pmd_t *pmd;
25431 +
25432 + pgd = pgd_offset(mm, address);
25433 + if (!pgd_present(*pgd))
25434 + return NULL;
25435 + pud = pud_offset(pgd, address);
25436 + if (!pud_present(*pud))
25437 + return NULL;
25438 + pmd = pmd_offset(pud, address);
25439 + if (!pmd_present(*pmd))
25440 + return NULL;
25441 + return pmd;
25442 +}
25443 +#endif
25444 +
25445 DEFINE_SPINLOCK(pgd_lock);
25446 LIST_HEAD(pgd_list);
25447
25448 @@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
25449 address += PMD_SIZE) {
25450
25451 unsigned long flags;
25452 +
25453 +#ifdef CONFIG_PAX_PER_CPU_PGD
25454 + unsigned long cpu;
25455 +#else
25456 struct page *page;
25457 +#endif
25458
25459 spin_lock_irqsave(&pgd_lock, flags);
25460 +
25461 +#ifdef CONFIG_PAX_PER_CPU_PGD
25462 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25463 + pgd_t *pgd = get_cpu_pgd(cpu);
25464 +#else
25465 list_for_each_entry(page, &pgd_list, lru) {
25466 - if (!vmalloc_sync_one(page_address(page), address))
25467 + pgd_t *pgd = page_address(page);
25468 +#endif
25469 +
25470 + if (!vmalloc_sync_one(pgd, address))
25471 break;
25472 }
25473 spin_unlock_irqrestore(&pgd_lock, flags);
25474 @@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
25475 * an interrupt in the middle of a task switch..
25476 */
25477 pgd_paddr = read_cr3();
25478 +
25479 +#ifdef CONFIG_PAX_PER_CPU_PGD
25480 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
25481 +#endif
25482 +
25483 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
25484 if (!pmd_k)
25485 return -1;
25486 @@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
25487
25488 const pgd_t *pgd_ref = pgd_offset_k(address);
25489 unsigned long flags;
25490 +
25491 +#ifdef CONFIG_PAX_PER_CPU_PGD
25492 + unsigned long cpu;
25493 +#else
25494 struct page *page;
25495 +#endif
25496
25497 if (pgd_none(*pgd_ref))
25498 continue;
25499
25500 spin_lock_irqsave(&pgd_lock, flags);
25501 +
25502 +#ifdef CONFIG_PAX_PER_CPU_PGD
25503 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25504 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
25505 +#else
25506 list_for_each_entry(page, &pgd_list, lru) {
25507 pgd_t *pgd;
25508 pgd = (pgd_t *)page_address(page) + pgd_index(address);
25509 +#endif
25510 +
25511 if (pgd_none(*pgd))
25512 set_pgd(pgd, *pgd_ref);
25513 else
25514 @@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
25515 * happen within a race in page table update. In the later
25516 * case just flush:
25517 */
25518 +
25519 +#ifdef CONFIG_PAX_PER_CPU_PGD
25520 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
25521 + pgd = pgd_offset_cpu(smp_processor_id(), address);
25522 +#else
25523 pgd = pgd_offset(current->active_mm, address);
25524 +#endif
25525 +
25526 pgd_ref = pgd_offset_k(address);
25527 if (pgd_none(*pgd_ref))
25528 return -1;
25529 @@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
25530 static int is_errata100(struct pt_regs *regs, unsigned long address)
25531 {
25532 #ifdef CONFIG_X86_64
25533 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
25534 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
25535 return 1;
25536 #endif
25537 return 0;
25538 @@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
25539 }
25540
25541 static const char nx_warning[] = KERN_CRIT
25542 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
25543 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
25544
25545 static void
25546 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25547 @@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25548 if (!oops_may_print())
25549 return;
25550
25551 - if (error_code & PF_INSTR) {
25552 + if (nx_enabled && (error_code & PF_INSTR)) {
25553 unsigned int level;
25554
25555 pte_t *pte = lookup_address(address, &level);
25556
25557 if (pte && pte_present(*pte) && !pte_exec(*pte))
25558 - printk(nx_warning, current_uid());
25559 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
25560 }
25561
25562 +#ifdef CONFIG_PAX_KERNEXEC
25563 + if (init_mm.start_code <= address && address < init_mm.end_code) {
25564 + if (current->signal->curr_ip)
25565 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25566 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
25567 + else
25568 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25569 + current->comm, task_pid_nr(current), current_uid(), current_euid());
25570 + }
25571 +#endif
25572 +
25573 printk(KERN_ALERT "BUG: unable to handle kernel ");
25574 if (address < PAGE_SIZE)
25575 printk(KERN_CONT "NULL pointer dereference");
25576 @@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25577 {
25578 struct task_struct *tsk = current;
25579
25580 +#ifdef CONFIG_X86_64
25581 + struct mm_struct *mm = tsk->mm;
25582 +
25583 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
25584 + if (regs->ip == (unsigned long)vgettimeofday) {
25585 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
25586 + return;
25587 + } else if (regs->ip == (unsigned long)vtime) {
25588 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
25589 + return;
25590 + } else if (regs->ip == (unsigned long)vgetcpu) {
25591 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
25592 + return;
25593 + }
25594 + }
25595 +#endif
25596 +
25597 /* User mode accesses just cause a SIGSEGV */
25598 if (error_code & PF_USER) {
25599 /*
25600 @@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25601 if (is_errata100(regs, address))
25602 return;
25603
25604 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25605 + if (pax_is_fetch_fault(regs, error_code, address)) {
25606 +
25607 +#ifdef CONFIG_PAX_EMUTRAMP
25608 + switch (pax_handle_fetch_fault(regs)) {
25609 + case 2:
25610 + return;
25611 + }
25612 +#endif
25613 +
25614 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25615 + do_group_exit(SIGKILL);
25616 + }
25617 +#endif
25618 +
25619 if (unlikely(show_unhandled_signals))
25620 show_signal_msg(regs, error_code, address, tsk);
25621
25622 @@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
25623 if (fault & VM_FAULT_HWPOISON) {
25624 printk(KERN_ERR
25625 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
25626 - tsk->comm, tsk->pid, address);
25627 + tsk->comm, task_pid_nr(tsk), address);
25628 code = BUS_MCEERR_AR;
25629 }
25630 #endif
25631 @@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
25632 return 1;
25633 }
25634
25635 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25636 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
25637 +{
25638 + pte_t *pte;
25639 + pmd_t *pmd;
25640 + spinlock_t *ptl;
25641 + unsigned char pte_mask;
25642 +
25643 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
25644 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
25645 + return 0;
25646 +
25647 + /* PaX: it's our fault, let's handle it if we can */
25648 +
25649 + /* PaX: take a look at read faults before acquiring any locks */
25650 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
25651 + /* instruction fetch attempt from a protected page in user mode */
25652 + up_read(&mm->mmap_sem);
25653 +
25654 +#ifdef CONFIG_PAX_EMUTRAMP
25655 + switch (pax_handle_fetch_fault(regs)) {
25656 + case 2:
25657 + return 1;
25658 + }
25659 +#endif
25660 +
25661 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25662 + do_group_exit(SIGKILL);
25663 + }
25664 +
25665 + pmd = pax_get_pmd(mm, address);
25666 + if (unlikely(!pmd))
25667 + return 0;
25668 +
25669 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
25670 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
25671 + pte_unmap_unlock(pte, ptl);
25672 + return 0;
25673 + }
25674 +
25675 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
25676 + /* write attempt to a protected page in user mode */
25677 + pte_unmap_unlock(pte, ptl);
25678 + return 0;
25679 + }
25680 +
25681 +#ifdef CONFIG_SMP
25682 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
25683 +#else
25684 + if (likely(address > get_limit(regs->cs)))
25685 +#endif
25686 + {
25687 + set_pte(pte, pte_mkread(*pte));
25688 + __flush_tlb_one(address);
25689 + pte_unmap_unlock(pte, ptl);
25690 + up_read(&mm->mmap_sem);
25691 + return 1;
25692 + }
25693 +
25694 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
25695 +
25696 + /*
25697 + * PaX: fill DTLB with user rights and retry
25698 + */
25699 + __asm__ __volatile__ (
25700 + "orb %2,(%1)\n"
25701 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
25702 +/*
25703 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
25704 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
25705 + * page fault when examined during a TLB load attempt. this is true not only
25706 + * for PTEs holding a non-present entry but also present entries that will
25707 + * raise a page fault (such as those set up by PaX, or the copy-on-write
25708 + * mechanism). in effect it means that we do *not* need to flush the TLBs
25709 + * for our target pages since their PTEs are simply not in the TLBs at all.
25710 +
25711 + * the best thing in omitting it is that we gain around 15-20% speed in the
25712 + * fast path of the page fault handler and can get rid of tracing since we
25713 + * can no longer flush unintended entries.
25714 + */
25715 + "invlpg (%0)\n"
25716 +#endif
25717 + __copyuser_seg"testb $0,(%0)\n"
25718 + "xorb %3,(%1)\n"
25719 + :
25720 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
25721 + : "memory", "cc");
25722 + pte_unmap_unlock(pte, ptl);
25723 + up_read(&mm->mmap_sem);
25724 + return 1;
25725 +}
25726 +#endif
25727 +
25728 /*
25729 * Handle a spurious fault caused by a stale TLB entry.
25730 *
25731 @@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
25732 static inline int
25733 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
25734 {
25735 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
25736 + return 1;
25737 +
25738 if (write) {
25739 /* write, present and write, not present: */
25740 if (unlikely(!(vma->vm_flags & VM_WRITE)))
25741 @@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25742 {
25743 struct vm_area_struct *vma;
25744 struct task_struct *tsk;
25745 - unsigned long address;
25746 struct mm_struct *mm;
25747 int write;
25748 int fault;
25749
25750 - tsk = current;
25751 - mm = tsk->mm;
25752 -
25753 /* Get the faulting address: */
25754 - address = read_cr2();
25755 + unsigned long address = read_cr2();
25756 +
25757 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25758 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
25759 + if (!search_exception_tables(regs->ip)) {
25760 + bad_area_nosemaphore(regs, error_code, address);
25761 + return;
25762 + }
25763 + if (address < PAX_USER_SHADOW_BASE) {
25764 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25765 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
25766 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
25767 + } else
25768 + address -= PAX_USER_SHADOW_BASE;
25769 + }
25770 +#endif
25771 +
25772 + tsk = current;
25773 + mm = tsk->mm;
25774
25775 /*
25776 * Detect and handle instructions that would cause a page fault for
25777 @@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25778 * User-mode registers count as a user access even for any
25779 * potential system fault or CPU buglet:
25780 */
25781 - if (user_mode_vm(regs)) {
25782 + if (user_mode(regs)) {
25783 local_irq_enable();
25784 error_code |= PF_USER;
25785 } else {
25786 @@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25787 might_sleep();
25788 }
25789
25790 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25791 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
25792 + return;
25793 +#endif
25794 +
25795 vma = find_vma(mm, address);
25796 if (unlikely(!vma)) {
25797 bad_area(regs, error_code, address);
25798 @@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25799 bad_area(regs, error_code, address);
25800 return;
25801 }
25802 - if (error_code & PF_USER) {
25803 - /*
25804 - * Accessing the stack below %sp is always a bug.
25805 - * The large cushion allows instructions like enter
25806 - * and pusha to work. ("enter $65535, $31" pushes
25807 - * 32 pointers and then decrements %sp by 65535.)
25808 - */
25809 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
25810 - bad_area(regs, error_code, address);
25811 - return;
25812 - }
25813 + /*
25814 + * Accessing the stack below %sp is always a bug.
25815 + * The large cushion allows instructions like enter
25816 + * and pusha to work. ("enter $65535, $31" pushes
25817 + * 32 pointers and then decrements %sp by 65535.)
25818 + */
25819 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
25820 + bad_area(regs, error_code, address);
25821 + return;
25822 }
25823 +
25824 +#ifdef CONFIG_PAX_SEGMEXEC
25825 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
25826 + bad_area(regs, error_code, address);
25827 + return;
25828 + }
25829 +#endif
25830 +
25831 if (unlikely(expand_stack(vma, address))) {
25832 bad_area(regs, error_code, address);
25833 return;
25834 @@ -1146,3 +1390,292 @@ good_area:
25835
25836 up_read(&mm->mmap_sem);
25837 }
25838 +
25839 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25840 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
25841 +{
25842 + struct mm_struct *mm = current->mm;
25843 + unsigned long ip = regs->ip;
25844 +
25845 + if (v8086_mode(regs))
25846 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
25847 +
25848 +#ifdef CONFIG_PAX_PAGEEXEC
25849 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
25850 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
25851 + return true;
25852 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
25853 + return true;
25854 + return false;
25855 + }
25856 +#endif
25857 +
25858 +#ifdef CONFIG_PAX_SEGMEXEC
25859 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
25860 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
25861 + return true;
25862 + return false;
25863 + }
25864 +#endif
25865 +
25866 + return false;
25867 +}
25868 +#endif
25869 +
25870 +#ifdef CONFIG_PAX_EMUTRAMP
25871 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
25872 +{
25873 + int err;
25874 +
25875 + do { /* PaX: libffi trampoline emulation */
25876 + unsigned char mov, jmp;
25877 + unsigned int addr1, addr2;
25878 +
25879 +#ifdef CONFIG_X86_64
25880 + if ((regs->ip + 9) >> 32)
25881 + break;
25882 +#endif
25883 +
25884 + err = get_user(mov, (unsigned char __user *)regs->ip);
25885 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25886 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25887 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25888 +
25889 + if (err)
25890 + break;
25891 +
25892 + if (mov == 0xB8 && jmp == 0xE9) {
25893 + regs->ax = addr1;
25894 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25895 + return 2;
25896 + }
25897 + } while (0);
25898 +
25899 + do { /* PaX: gcc trampoline emulation #1 */
25900 + unsigned char mov1, mov2;
25901 + unsigned short jmp;
25902 + unsigned int addr1, addr2;
25903 +
25904 +#ifdef CONFIG_X86_64
25905 + if ((regs->ip + 11) >> 32)
25906 + break;
25907 +#endif
25908 +
25909 + err = get_user(mov1, (unsigned char __user *)regs->ip);
25910 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25911 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
25912 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25913 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
25914 +
25915 + if (err)
25916 + break;
25917 +
25918 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
25919 + regs->cx = addr1;
25920 + regs->ax = addr2;
25921 + regs->ip = addr2;
25922 + return 2;
25923 + }
25924 + } while (0);
25925 +
25926 + do { /* PaX: gcc trampoline emulation #2 */
25927 + unsigned char mov, jmp;
25928 + unsigned int addr1, addr2;
25929 +
25930 +#ifdef CONFIG_X86_64
25931 + if ((regs->ip + 9) >> 32)
25932 + break;
25933 +#endif
25934 +
25935 + err = get_user(mov, (unsigned char __user *)regs->ip);
25936 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25937 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25938 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25939 +
25940 + if (err)
25941 + break;
25942 +
25943 + if (mov == 0xB9 && jmp == 0xE9) {
25944 + regs->cx = addr1;
25945 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25946 + return 2;
25947 + }
25948 + } while (0);
25949 +
25950 + return 1; /* PaX in action */
25951 +}
25952 +
25953 +#ifdef CONFIG_X86_64
25954 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
25955 +{
25956 + int err;
25957 +
25958 + do { /* PaX: libffi trampoline emulation */
25959 + unsigned short mov1, mov2, jmp1;
25960 + unsigned char stcclc, jmp2;
25961 + unsigned long addr1, addr2;
25962 +
25963 + err = get_user(mov1, (unsigned short __user *)regs->ip);
25964 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25965 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25966 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25967 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
25968 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
25969 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
25970 +
25971 + if (err)
25972 + break;
25973 +
25974 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25975 + regs->r11 = addr1;
25976 + regs->r10 = addr2;
25977 + if (stcclc == 0xF8)
25978 + regs->flags &= ~X86_EFLAGS_CF;
25979 + else
25980 + regs->flags |= X86_EFLAGS_CF;
25981 + regs->ip = addr1;
25982 + return 2;
25983 + }
25984 + } while (0);
25985 +
25986 + do { /* PaX: gcc trampoline emulation #1 */
25987 + unsigned short mov1, mov2, jmp1;
25988 + unsigned char jmp2;
25989 + unsigned int addr1;
25990 + unsigned long addr2;
25991 +
25992 + err = get_user(mov1, (unsigned short __user *)regs->ip);
25993 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
25994 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
25995 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
25996 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
25997 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
25998 +
25999 + if (err)
26000 + break;
26001 +
26002 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
26003 + regs->r11 = addr1;
26004 + regs->r10 = addr2;
26005 + regs->ip = addr1;
26006 + return 2;
26007 + }
26008 + } while (0);
26009 +
26010 + do { /* PaX: gcc trampoline emulation #2 */
26011 + unsigned short mov1, mov2, jmp1;
26012 + unsigned char jmp2;
26013 + unsigned long addr1, addr2;
26014 +
26015 + err = get_user(mov1, (unsigned short __user *)regs->ip);
26016 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
26017 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
26018 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
26019 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
26020 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
26021 +
26022 + if (err)
26023 + break;
26024 +
26025 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
26026 + regs->r11 = addr1;
26027 + regs->r10 = addr2;
26028 + regs->ip = addr1;
26029 + return 2;
26030 + }
26031 + } while (0);
26032 +
26033 + return 1; /* PaX in action */
26034 +}
26035 +#endif
26036 +
26037 +/*
26038 + * PaX: decide what to do with offenders (regs->ip = fault address)
26039 + *
26040 + * returns 1 when task should be killed
26041 + * 2 when gcc trampoline was detected
26042 + */
26043 +static int pax_handle_fetch_fault(struct pt_regs *regs)
26044 +{
26045 + if (v8086_mode(regs))
26046 + return 1;
26047 +
26048 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
26049 + return 1;
26050 +
26051 +#ifdef CONFIG_X86_32
26052 + return pax_handle_fetch_fault_32(regs);
26053 +#else
26054 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
26055 + return pax_handle_fetch_fault_32(regs);
26056 + else
26057 + return pax_handle_fetch_fault_64(regs);
26058 +#endif
26059 +}
26060 +#endif
26061 +
26062 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26063 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
26064 +{
26065 + long i;
26066 +
26067 + printk(KERN_ERR "PAX: bytes at PC: ");
26068 + for (i = 0; i < 20; i++) {
26069 + unsigned char c;
26070 + if (get_user(c, (unsigned char __force_user *)pc+i))
26071 + printk(KERN_CONT "?? ");
26072 + else
26073 + printk(KERN_CONT "%02x ", c);
26074 + }
26075 + printk("\n");
26076 +
26077 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
26078 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
26079 + unsigned long c;
26080 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
26081 +#ifdef CONFIG_X86_32
26082 + printk(KERN_CONT "???????? ");
26083 +#else
26084 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
26085 + printk(KERN_CONT "???????? ???????? ");
26086 + else
26087 + printk(KERN_CONT "???????????????? ");
26088 +#endif
26089 + } else {
26090 +#ifdef CONFIG_X86_64
26091 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
26092 + printk(KERN_CONT "%08x ", (unsigned int)c);
26093 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
26094 + } else
26095 +#endif
26096 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
26097 + }
26098 + }
26099 + printk("\n");
26100 +}
26101 +#endif
26102 +
26103 +/**
26104 + * probe_kernel_write(): safely attempt to write to a location
26105 + * @dst: address to write to
26106 + * @src: pointer to the data that shall be written
26107 + * @size: size of the data chunk
26108 + *
26109 + * Safely write to address @dst from the buffer at @src. If a kernel fault
26110 + * happens, handle that and return -EFAULT.
26111 + */
26112 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
26113 +{
26114 + long ret;
26115 + mm_segment_t old_fs = get_fs();
26116 +
26117 + set_fs(KERNEL_DS);
26118 + pagefault_disable();
26119 + pax_open_kernel();
26120 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
26121 + pax_close_kernel();
26122 + pagefault_enable();
26123 + set_fs(old_fs);
26124 +
26125 + return ret ? -EFAULT : 0;
26126 +}
26127 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
26128 index 71da1bc..7a16bf4 100644
26129 --- a/arch/x86/mm/gup.c
26130 +++ b/arch/x86/mm/gup.c
26131 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
26132 addr = start;
26133 len = (unsigned long) nr_pages << PAGE_SHIFT;
26134 end = start + len;
26135 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
26136 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
26137 (void __user *)start, len)))
26138 return 0;
26139
26140 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
26141 index 63a6ba6..79abd7a 100644
26142 --- a/arch/x86/mm/highmem_32.c
26143 +++ b/arch/x86/mm/highmem_32.c
26144 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
26145 idx = type + KM_TYPE_NR*smp_processor_id();
26146 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26147 BUG_ON(!pte_none(*(kmap_pte-idx)));
26148 +
26149 + pax_open_kernel();
26150 set_pte(kmap_pte-idx, mk_pte(page, prot));
26151 + pax_close_kernel();
26152
26153 return (void *)vaddr;
26154 }
26155 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
26156 index f46c3407..6ff9a26 100644
26157 --- a/arch/x86/mm/hugetlbpage.c
26158 +++ b/arch/x86/mm/hugetlbpage.c
26159 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
26160 struct hstate *h = hstate_file(file);
26161 struct mm_struct *mm = current->mm;
26162 struct vm_area_struct *vma;
26163 - unsigned long start_addr;
26164 + unsigned long start_addr, pax_task_size = TASK_SIZE;
26165 +
26166 +#ifdef CONFIG_PAX_SEGMEXEC
26167 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
26168 + pax_task_size = SEGMEXEC_TASK_SIZE;
26169 +#endif
26170 +
26171 + pax_task_size -= PAGE_SIZE;
26172
26173 if (len > mm->cached_hole_size) {
26174 - start_addr = mm->free_area_cache;
26175 + start_addr = mm->free_area_cache;
26176 } else {
26177 - start_addr = TASK_UNMAPPED_BASE;
26178 - mm->cached_hole_size = 0;
26179 + start_addr = mm->mmap_base;
26180 + mm->cached_hole_size = 0;
26181 }
26182
26183 full_search:
26184 @@ -281,26 +288,27 @@ full_search:
26185
26186 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
26187 /* At this point: (!vma || addr < vma->vm_end). */
26188 - if (TASK_SIZE - len < addr) {
26189 + if (pax_task_size - len < addr) {
26190 /*
26191 * Start a new search - just in case we missed
26192 * some holes.
26193 */
26194 - if (start_addr != TASK_UNMAPPED_BASE) {
26195 - start_addr = TASK_UNMAPPED_BASE;
26196 + if (start_addr != mm->mmap_base) {
26197 + start_addr = mm->mmap_base;
26198 mm->cached_hole_size = 0;
26199 goto full_search;
26200 }
26201 return -ENOMEM;
26202 }
26203 - if (!vma || addr + len <= vma->vm_start) {
26204 - mm->free_area_cache = addr + len;
26205 - return addr;
26206 - }
26207 + if (check_heap_stack_gap(vma, addr, len))
26208 + break;
26209 if (addr + mm->cached_hole_size < vma->vm_start)
26210 mm->cached_hole_size = vma->vm_start - addr;
26211 addr = ALIGN(vma->vm_end, huge_page_size(h));
26212 }
26213 +
26214 + mm->free_area_cache = addr + len;
26215 + return addr;
26216 }
26217
26218 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26219 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26220 {
26221 struct hstate *h = hstate_file(file);
26222 struct mm_struct *mm = current->mm;
26223 - struct vm_area_struct *vma, *prev_vma;
26224 - unsigned long base = mm->mmap_base, addr = addr0;
26225 + struct vm_area_struct *vma;
26226 + unsigned long base = mm->mmap_base, addr;
26227 unsigned long largest_hole = mm->cached_hole_size;
26228 - int first_time = 1;
26229
26230 /* don't allow allocations above current base */
26231 if (mm->free_area_cache > base)
26232 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26233 largest_hole = 0;
26234 mm->free_area_cache = base;
26235 }
26236 -try_again:
26237 +
26238 /* make sure it can fit in the remaining address space */
26239 if (mm->free_area_cache < len)
26240 goto fail;
26241
26242 /* either no address requested or cant fit in requested address hole */
26243 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
26244 + addr = (mm->free_area_cache - len);
26245 do {
26246 + addr &= huge_page_mask(h);
26247 + vma = find_vma(mm, addr);
26248 /*
26249 * Lookup failure means no vma is above this address,
26250 * i.e. return with success:
26251 - */
26252 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
26253 - return addr;
26254 -
26255 - /*
26256 * new region fits between prev_vma->vm_end and
26257 * vma->vm_start, use it:
26258 */
26259 - if (addr + len <= vma->vm_start &&
26260 - (!prev_vma || (addr >= prev_vma->vm_end))) {
26261 + if (check_heap_stack_gap(vma, addr, len)) {
26262 /* remember the address as a hint for next time */
26263 - mm->cached_hole_size = largest_hole;
26264 - return (mm->free_area_cache = addr);
26265 - } else {
26266 - /* pull free_area_cache down to the first hole */
26267 - if (mm->free_area_cache == vma->vm_end) {
26268 - mm->free_area_cache = vma->vm_start;
26269 - mm->cached_hole_size = largest_hole;
26270 - }
26271 + mm->cached_hole_size = largest_hole;
26272 + return (mm->free_area_cache = addr);
26273 + }
26274 + /* pull free_area_cache down to the first hole */
26275 + if (mm->free_area_cache == vma->vm_end) {
26276 + mm->free_area_cache = vma->vm_start;
26277 + mm->cached_hole_size = largest_hole;
26278 }
26279
26280 /* remember the largest hole we saw so far */
26281 if (addr + largest_hole < vma->vm_start)
26282 - largest_hole = vma->vm_start - addr;
26283 + largest_hole = vma->vm_start - addr;
26284
26285 /* try just below the current vma->vm_start */
26286 - addr = (vma->vm_start - len) & huge_page_mask(h);
26287 - } while (len <= vma->vm_start);
26288 + addr = skip_heap_stack_gap(vma, len);
26289 + } while (!IS_ERR_VALUE(addr));
26290
26291 fail:
26292 /*
26293 - * if hint left us with no space for the requested
26294 - * mapping then try again:
26295 - */
26296 - if (first_time) {
26297 - mm->free_area_cache = base;
26298 - largest_hole = 0;
26299 - first_time = 0;
26300 - goto try_again;
26301 - }
26302 - /*
26303 * A failed mmap() very likely causes application failure,
26304 * so fall back to the bottom-up function here. This scenario
26305 * can happen with large stack limits and large mmap()
26306 * allocations.
26307 */
26308 - mm->free_area_cache = TASK_UNMAPPED_BASE;
26309 +
26310 +#ifdef CONFIG_PAX_SEGMEXEC
26311 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
26312 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
26313 + else
26314 +#endif
26315 +
26316 + mm->mmap_base = TASK_UNMAPPED_BASE;
26317 +
26318 +#ifdef CONFIG_PAX_RANDMMAP
26319 + if (mm->pax_flags & MF_PAX_RANDMMAP)
26320 + mm->mmap_base += mm->delta_mmap;
26321 +#endif
26322 +
26323 + mm->free_area_cache = mm->mmap_base;
26324 mm->cached_hole_size = ~0UL;
26325 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
26326 len, pgoff, flags);
26327 @@ -387,6 +393,7 @@ fail:
26328 /*
26329 * Restore the topdown base:
26330 */
26331 + mm->mmap_base = base;
26332 mm->free_area_cache = base;
26333 mm->cached_hole_size = ~0UL;
26334
26335 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26336 struct hstate *h = hstate_file(file);
26337 struct mm_struct *mm = current->mm;
26338 struct vm_area_struct *vma;
26339 + unsigned long pax_task_size = TASK_SIZE;
26340
26341 if (len & ~huge_page_mask(h))
26342 return -EINVAL;
26343 - if (len > TASK_SIZE)
26344 +
26345 +#ifdef CONFIG_PAX_SEGMEXEC
26346 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
26347 + pax_task_size = SEGMEXEC_TASK_SIZE;
26348 +#endif
26349 +
26350 + pax_task_size -= PAGE_SIZE;
26351 +
26352 + if (len > pax_task_size)
26353 return -ENOMEM;
26354
26355 if (flags & MAP_FIXED) {
26356 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26357 if (addr) {
26358 addr = ALIGN(addr, huge_page_size(h));
26359 vma = find_vma(mm, addr);
26360 - if (TASK_SIZE - len >= addr &&
26361 - (!vma || addr + len <= vma->vm_start))
26362 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
26363 return addr;
26364 }
26365 if (mm->get_unmapped_area == arch_get_unmapped_area)
26366 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
26367 index 73ffd55..f61c2a7 100644
26368 --- a/arch/x86/mm/init.c
26369 +++ b/arch/x86/mm/init.c
26370 @@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
26371 * cause a hotspot and fill up ZONE_DMA. The page tables
26372 * need roughly 0.5KB per GB.
26373 */
26374 -#ifdef CONFIG_X86_32
26375 - start = 0x7000;
26376 -#else
26377 - start = 0x8000;
26378 -#endif
26379 + start = 0x100000;
26380 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
26381 tables, PAGE_SIZE);
26382 if (e820_table_start == -1UL)
26383 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26384 #endif
26385
26386 set_nx();
26387 - if (nx_enabled)
26388 + if (nx_enabled && cpu_has_nx)
26389 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
26390
26391 /* Enable PSE if available */
26392 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26393 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
26394 * mmio resources as well as potential bios/acpi data regions.
26395 */
26396 +
26397 int devmem_is_allowed(unsigned long pagenr)
26398 {
26399 +#ifdef CONFIG_GRKERNSEC_KMEM
26400 + /* allow BDA */
26401 + if (!pagenr)
26402 + return 1;
26403 + /* allow EBDA */
26404 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
26405 + return 1;
26406 + /* allow ISA/video mem */
26407 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26408 + return 1;
26409 + /* throw out everything else below 1MB */
26410 + if (pagenr <= 256)
26411 + return 0;
26412 +#else
26413 if (pagenr <= 256)
26414 return 1;
26415 +#endif
26416 +
26417 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
26418 return 0;
26419 if (!page_is_ram(pagenr))
26420 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
26421
26422 void free_initmem(void)
26423 {
26424 +
26425 +#ifdef CONFIG_PAX_KERNEXEC
26426 +#ifdef CONFIG_X86_32
26427 + /* PaX: limit KERNEL_CS to actual size */
26428 + unsigned long addr, limit;
26429 + struct desc_struct d;
26430 + int cpu;
26431 +
26432 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
26433 + limit = (limit - 1UL) >> PAGE_SHIFT;
26434 +
26435 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
26436 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26437 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
26438 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
26439 + }
26440 +
26441 + /* PaX: make KERNEL_CS read-only */
26442 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
26443 + if (!paravirt_enabled())
26444 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
26445 +/*
26446 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
26447 + pgd = pgd_offset_k(addr);
26448 + pud = pud_offset(pgd, addr);
26449 + pmd = pmd_offset(pud, addr);
26450 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26451 + }
26452 +*/
26453 +#ifdef CONFIG_X86_PAE
26454 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
26455 +/*
26456 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
26457 + pgd = pgd_offset_k(addr);
26458 + pud = pud_offset(pgd, addr);
26459 + pmd = pmd_offset(pud, addr);
26460 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26461 + }
26462 +*/
26463 +#endif
26464 +
26465 +#ifdef CONFIG_MODULES
26466 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
26467 +#endif
26468 +
26469 +#else
26470 + pgd_t *pgd;
26471 + pud_t *pud;
26472 + pmd_t *pmd;
26473 + unsigned long addr, end;
26474 +
26475 + /* PaX: make kernel code/rodata read-only, rest non-executable */
26476 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
26477 + pgd = pgd_offset_k(addr);
26478 + pud = pud_offset(pgd, addr);
26479 + pmd = pmd_offset(pud, addr);
26480 + if (!pmd_present(*pmd))
26481 + continue;
26482 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
26483 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26484 + else
26485 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26486 + }
26487 +
26488 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
26489 + end = addr + KERNEL_IMAGE_SIZE;
26490 + for (; addr < end; addr += PMD_SIZE) {
26491 + pgd = pgd_offset_k(addr);
26492 + pud = pud_offset(pgd, addr);
26493 + pmd = pmd_offset(pud, addr);
26494 + if (!pmd_present(*pmd))
26495 + continue;
26496 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
26497 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26498 + }
26499 +#endif
26500 +
26501 + flush_tlb_all();
26502 +#endif
26503 +
26504 free_init_pages("unused kernel memory",
26505 (unsigned long)(&__init_begin),
26506 (unsigned long)(&__init_end));
26507 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
26508 index 30938c1..bda3d5d 100644
26509 --- a/arch/x86/mm/init_32.c
26510 +++ b/arch/x86/mm/init_32.c
26511 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
26512 }
26513
26514 /*
26515 - * Creates a middle page table and puts a pointer to it in the
26516 - * given global directory entry. This only returns the gd entry
26517 - * in non-PAE compilation mode, since the middle layer is folded.
26518 - */
26519 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
26520 -{
26521 - pud_t *pud;
26522 - pmd_t *pmd_table;
26523 -
26524 -#ifdef CONFIG_X86_PAE
26525 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
26526 - if (after_bootmem)
26527 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
26528 - else
26529 - pmd_table = (pmd_t *)alloc_low_page();
26530 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
26531 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
26532 - pud = pud_offset(pgd, 0);
26533 - BUG_ON(pmd_table != pmd_offset(pud, 0));
26534 -
26535 - return pmd_table;
26536 - }
26537 -#endif
26538 - pud = pud_offset(pgd, 0);
26539 - pmd_table = pmd_offset(pud, 0);
26540 -
26541 - return pmd_table;
26542 -}
26543 -
26544 -/*
26545 * Create a page table and place a pointer to it in a middle page
26546 * directory entry:
26547 */
26548 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
26549 page_table = (pte_t *)alloc_low_page();
26550
26551 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
26552 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26553 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
26554 +#else
26555 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
26556 +#endif
26557 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
26558 }
26559
26560 return pte_offset_kernel(pmd, 0);
26561 }
26562
26563 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
26564 +{
26565 + pud_t *pud;
26566 + pmd_t *pmd_table;
26567 +
26568 + pud = pud_offset(pgd, 0);
26569 + pmd_table = pmd_offset(pud, 0);
26570 +
26571 + return pmd_table;
26572 +}
26573 +
26574 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
26575 {
26576 int pgd_idx = pgd_index(vaddr);
26577 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26578 int pgd_idx, pmd_idx;
26579 unsigned long vaddr;
26580 pgd_t *pgd;
26581 + pud_t *pud;
26582 pmd_t *pmd;
26583 pte_t *pte = NULL;
26584
26585 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26586 pgd = pgd_base + pgd_idx;
26587
26588 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
26589 - pmd = one_md_table_init(pgd);
26590 - pmd = pmd + pmd_index(vaddr);
26591 + pud = pud_offset(pgd, vaddr);
26592 + pmd = pmd_offset(pud, vaddr);
26593 +
26594 +#ifdef CONFIG_X86_PAE
26595 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26596 +#endif
26597 +
26598 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
26599 pmd++, pmd_idx++) {
26600 pte = page_table_kmap_check(one_page_table_init(pmd),
26601 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26602 }
26603 }
26604
26605 -static inline int is_kernel_text(unsigned long addr)
26606 +static inline int is_kernel_text(unsigned long start, unsigned long end)
26607 {
26608 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
26609 - return 1;
26610 - return 0;
26611 + if ((start > ktla_ktva((unsigned long)_etext) ||
26612 + end <= ktla_ktva((unsigned long)_stext)) &&
26613 + (start > ktla_ktva((unsigned long)_einittext) ||
26614 + end <= ktla_ktva((unsigned long)_sinittext)) &&
26615 +
26616 +#ifdef CONFIG_ACPI_SLEEP
26617 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
26618 +#endif
26619 +
26620 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
26621 + return 0;
26622 + return 1;
26623 }
26624
26625 /*
26626 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
26627 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
26628 unsigned long start_pfn, end_pfn;
26629 pgd_t *pgd_base = swapper_pg_dir;
26630 - int pgd_idx, pmd_idx, pte_ofs;
26631 + unsigned int pgd_idx, pmd_idx, pte_ofs;
26632 unsigned long pfn;
26633 pgd_t *pgd;
26634 + pud_t *pud;
26635 pmd_t *pmd;
26636 pte_t *pte;
26637 unsigned pages_2m, pages_4k;
26638 @@ -278,8 +279,13 @@ repeat:
26639 pfn = start_pfn;
26640 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26641 pgd = pgd_base + pgd_idx;
26642 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
26643 - pmd = one_md_table_init(pgd);
26644 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
26645 + pud = pud_offset(pgd, 0);
26646 + pmd = pmd_offset(pud, 0);
26647 +
26648 +#ifdef CONFIG_X86_PAE
26649 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26650 +#endif
26651
26652 if (pfn >= end_pfn)
26653 continue;
26654 @@ -291,14 +297,13 @@ repeat:
26655 #endif
26656 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
26657 pmd++, pmd_idx++) {
26658 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
26659 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
26660
26661 /*
26662 * Map with big pages if possible, otherwise
26663 * create normal page tables:
26664 */
26665 if (use_pse) {
26666 - unsigned int addr2;
26667 pgprot_t prot = PAGE_KERNEL_LARGE;
26668 /*
26669 * first pass will use the same initial
26670 @@ -308,11 +313,7 @@ repeat:
26671 __pgprot(PTE_IDENT_ATTR |
26672 _PAGE_PSE);
26673
26674 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
26675 - PAGE_OFFSET + PAGE_SIZE-1;
26676 -
26677 - if (is_kernel_text(addr) ||
26678 - is_kernel_text(addr2))
26679 + if (is_kernel_text(address, address + PMD_SIZE))
26680 prot = PAGE_KERNEL_LARGE_EXEC;
26681
26682 pages_2m++;
26683 @@ -329,7 +330,7 @@ repeat:
26684 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26685 pte += pte_ofs;
26686 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
26687 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
26688 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
26689 pgprot_t prot = PAGE_KERNEL;
26690 /*
26691 * first pass will use the same initial
26692 @@ -337,7 +338,7 @@ repeat:
26693 */
26694 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
26695
26696 - if (is_kernel_text(addr))
26697 + if (is_kernel_text(address, address + PAGE_SIZE))
26698 prot = PAGE_KERNEL_EXEC;
26699
26700 pages_4k++;
26701 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
26702
26703 pud = pud_offset(pgd, va);
26704 pmd = pmd_offset(pud, va);
26705 - if (!pmd_present(*pmd))
26706 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
26707 break;
26708
26709 pte = pte_offset_kernel(pmd, va);
26710 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
26711
26712 static void __init pagetable_init(void)
26713 {
26714 - pgd_t *pgd_base = swapper_pg_dir;
26715 -
26716 - permanent_kmaps_init(pgd_base);
26717 + permanent_kmaps_init(swapper_pg_dir);
26718 }
26719
26720 #ifdef CONFIG_ACPI_SLEEP
26721 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
26722 * ACPI suspend needs this for resume, because things like the intel-agp
26723 * driver might have split up a kernel 4MB mapping.
26724 */
26725 -char swsusp_pg_dir[PAGE_SIZE]
26726 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
26727 __attribute__ ((aligned(PAGE_SIZE)));
26728
26729 static inline void save_pg_dir(void)
26730 {
26731 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
26732 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
26733 }
26734 #else /* !CONFIG_ACPI_SLEEP */
26735 static inline void save_pg_dir(void)
26736 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
26737 flush_tlb_all();
26738 }
26739
26740 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26741 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26742 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26743
26744 /* user-defined highmem size */
26745 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
26746 * Initialize the boot-time allocator (with low memory only):
26747 */
26748 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
26749 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
26750 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
26751 PAGE_SIZE);
26752 if (bootmap == -1L)
26753 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
26754 @@ -864,6 +863,12 @@ void __init mem_init(void)
26755
26756 pci_iommu_alloc();
26757
26758 +#ifdef CONFIG_PAX_PER_CPU_PGD
26759 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26760 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26761 + KERNEL_PGD_PTRS);
26762 +#endif
26763 +
26764 #ifdef CONFIG_FLATMEM
26765 BUG_ON(!mem_map);
26766 #endif
26767 @@ -881,7 +886,7 @@ void __init mem_init(void)
26768 set_highmem_pages_init();
26769
26770 codesize = (unsigned long) &_etext - (unsigned long) &_text;
26771 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
26772 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
26773 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
26774
26775 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
26776 @@ -923,10 +928,10 @@ void __init mem_init(void)
26777 ((unsigned long)&__init_end -
26778 (unsigned long)&__init_begin) >> 10,
26779
26780 - (unsigned long)&_etext, (unsigned long)&_edata,
26781 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
26782 + (unsigned long)&_sdata, (unsigned long)&_edata,
26783 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
26784
26785 - (unsigned long)&_text, (unsigned long)&_etext,
26786 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
26787 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
26788
26789 /*
26790 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
26791 if (!kernel_set_to_readonly)
26792 return;
26793
26794 + start = ktla_ktva(start);
26795 pr_debug("Set kernel text: %lx - %lx for read write\n",
26796 start, start+size);
26797
26798 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
26799 if (!kernel_set_to_readonly)
26800 return;
26801
26802 + start = ktla_ktva(start);
26803 pr_debug("Set kernel text: %lx - %lx for read only\n",
26804 start, start+size);
26805
26806 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
26807 unsigned long start = PFN_ALIGN(_text);
26808 unsigned long size = PFN_ALIGN(_etext) - start;
26809
26810 + start = ktla_ktva(start);
26811 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
26812 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
26813 size >> 10);
26814 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
26815 index 7d095ad..25d2549 100644
26816 --- a/arch/x86/mm/init_64.c
26817 +++ b/arch/x86/mm/init_64.c
26818 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
26819 pmd = fill_pmd(pud, vaddr);
26820 pte = fill_pte(pmd, vaddr);
26821
26822 + pax_open_kernel();
26823 set_pte(pte, new_pte);
26824 + pax_close_kernel();
26825
26826 /*
26827 * It's enough to flush this one mapping.
26828 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
26829 pgd = pgd_offset_k((unsigned long)__va(phys));
26830 if (pgd_none(*pgd)) {
26831 pud = (pud_t *) spp_getpage();
26832 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
26833 - _PAGE_USER));
26834 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
26835 }
26836 pud = pud_offset(pgd, (unsigned long)__va(phys));
26837 if (pud_none(*pud)) {
26838 pmd = (pmd_t *) spp_getpage();
26839 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
26840 - _PAGE_USER));
26841 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
26842 }
26843 pmd = pmd_offset(pud, phys);
26844 BUG_ON(!pmd_none(*pmd));
26845 @@ -675,6 +675,12 @@ void __init mem_init(void)
26846
26847 pci_iommu_alloc();
26848
26849 +#ifdef CONFIG_PAX_PER_CPU_PGD
26850 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26851 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26852 + KERNEL_PGD_PTRS);
26853 +#endif
26854 +
26855 /* clear_bss() already clear the empty_zero_page */
26856
26857 reservedpages = 0;
26858 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
26859 static struct vm_area_struct gate_vma = {
26860 .vm_start = VSYSCALL_START,
26861 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
26862 - .vm_page_prot = PAGE_READONLY_EXEC,
26863 - .vm_flags = VM_READ | VM_EXEC
26864 + .vm_page_prot = PAGE_READONLY,
26865 + .vm_flags = VM_READ
26866 };
26867
26868 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26869 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
26870
26871 const char *arch_vma_name(struct vm_area_struct *vma)
26872 {
26873 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26874 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26875 return "[vdso]";
26876 if (vma == &gate_vma)
26877 return "[vsyscall]";
26878 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
26879 index 84e236c..69bd3f6 100644
26880 --- a/arch/x86/mm/iomap_32.c
26881 +++ b/arch/x86/mm/iomap_32.c
26882 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
26883 debug_kmap_atomic(type);
26884 idx = type + KM_TYPE_NR * smp_processor_id();
26885 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26886 +
26887 + pax_open_kernel();
26888 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
26889 + pax_close_kernel();
26890 +
26891 arch_flush_lazy_mmu_mode();
26892
26893 return (void *)vaddr;
26894 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
26895 index 2feb9bd..ab91e7b 100644
26896 --- a/arch/x86/mm/ioremap.c
26897 +++ b/arch/x86/mm/ioremap.c
26898 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
26899 * Second special case: Some BIOSen report the PC BIOS
26900 * area (640->1Mb) as ram even though it is not.
26901 */
26902 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
26903 - pagenr < (BIOS_END >> PAGE_SHIFT))
26904 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
26905 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26906 return 0;
26907
26908 for (i = 0; i < e820.nr_map; i++) {
26909 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
26910 /*
26911 * Don't allow anybody to remap normal RAM that we're using..
26912 */
26913 - for (pfn = phys_addr >> PAGE_SHIFT;
26914 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
26915 - pfn++) {
26916 -
26917 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
26918 int is_ram = page_is_ram(pfn);
26919
26920 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
26921 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
26922 return NULL;
26923 WARN_ON_ONCE(is_ram);
26924 }
26925 @@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
26926
26927 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
26928 if (page_is_ram(start >> PAGE_SHIFT))
26929 +#ifdef CONFIG_HIGHMEM
26930 + if ((start >> PAGE_SHIFT) < max_low_pfn)
26931 +#endif
26932 return __va(phys);
26933
26934 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
26935 @@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
26936 early_param("early_ioremap_debug", early_ioremap_debug_setup);
26937
26938 static __initdata int after_paging_init;
26939 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
26940 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
26941
26942 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
26943 {
26944 @@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
26945 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
26946
26947 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
26948 - memset(bm_pte, 0, sizeof(bm_pte));
26949 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
26950 + pmd_populate_user(&init_mm, pmd, bm_pte);
26951
26952 /*
26953 * The boot-ioremap range spans multiple pmds, for which
26954 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
26955 index 8cc1833..1abbc5b 100644
26956 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
26957 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
26958 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
26959 * memory (e.g. tracked pages)? For now, we need this to avoid
26960 * invoking kmemcheck for PnP BIOS calls.
26961 */
26962 - if (regs->flags & X86_VM_MASK)
26963 + if (v8086_mode(regs))
26964 return false;
26965 - if (regs->cs != __KERNEL_CS)
26966 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
26967 return false;
26968
26969 pte = kmemcheck_pte_lookup(address);
26970 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
26971 index c9e57af..07a321b 100644
26972 --- a/arch/x86/mm/mmap.c
26973 +++ b/arch/x86/mm/mmap.c
26974 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
26975 * Leave an at least ~128 MB hole with possible stack randomization.
26976 */
26977 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
26978 -#define MAX_GAP (TASK_SIZE/6*5)
26979 +#define MAX_GAP (pax_task_size/6*5)
26980
26981 /*
26982 * True on X86_32 or when emulating IA32 on X86_64
26983 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
26984 return rnd << PAGE_SHIFT;
26985 }
26986
26987 -static unsigned long mmap_base(void)
26988 +static unsigned long mmap_base(struct mm_struct *mm)
26989 {
26990 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
26991 + unsigned long pax_task_size = TASK_SIZE;
26992 +
26993 +#ifdef CONFIG_PAX_SEGMEXEC
26994 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
26995 + pax_task_size = SEGMEXEC_TASK_SIZE;
26996 +#endif
26997
26998 if (gap < MIN_GAP)
26999 gap = MIN_GAP;
27000 else if (gap > MAX_GAP)
27001 gap = MAX_GAP;
27002
27003 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
27004 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
27005 }
27006
27007 /*
27008 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
27009 * does, but not when emulating X86_32
27010 */
27011 -static unsigned long mmap_legacy_base(void)
27012 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
27013 {
27014 - if (mmap_is_ia32())
27015 + if (mmap_is_ia32()) {
27016 +
27017 +#ifdef CONFIG_PAX_SEGMEXEC
27018 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
27019 + return SEGMEXEC_TASK_UNMAPPED_BASE;
27020 + else
27021 +#endif
27022 +
27023 return TASK_UNMAPPED_BASE;
27024 - else
27025 + } else
27026 return TASK_UNMAPPED_BASE + mmap_rnd();
27027 }
27028
27029 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
27030 void arch_pick_mmap_layout(struct mm_struct *mm)
27031 {
27032 if (mmap_is_legacy()) {
27033 - mm->mmap_base = mmap_legacy_base();
27034 + mm->mmap_base = mmap_legacy_base(mm);
27035 +
27036 +#ifdef CONFIG_PAX_RANDMMAP
27037 + if (mm->pax_flags & MF_PAX_RANDMMAP)
27038 + mm->mmap_base += mm->delta_mmap;
27039 +#endif
27040 +
27041 mm->get_unmapped_area = arch_get_unmapped_area;
27042 mm->unmap_area = arch_unmap_area;
27043 } else {
27044 - mm->mmap_base = mmap_base();
27045 + mm->mmap_base = mmap_base(mm);
27046 +
27047 +#ifdef CONFIG_PAX_RANDMMAP
27048 + if (mm->pax_flags & MF_PAX_RANDMMAP)
27049 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
27050 +#endif
27051 +
27052 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
27053 mm->unmap_area = arch_unmap_area_topdown;
27054 }
27055 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
27056 index 132772a..b961f11 100644
27057 --- a/arch/x86/mm/mmio-mod.c
27058 +++ b/arch/x86/mm/mmio-mod.c
27059 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
27060 break;
27061 default:
27062 {
27063 - unsigned char *ip = (unsigned char *)instptr;
27064 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
27065 my_trace->opcode = MMIO_UNKNOWN_OP;
27066 my_trace->width = 0;
27067 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
27068 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
27069 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
27070 void __iomem *addr)
27071 {
27072 - static atomic_t next_id;
27073 + static atomic_unchecked_t next_id;
27074 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
27075 /* These are page-unaligned. */
27076 struct mmiotrace_map map = {
27077 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
27078 .private = trace
27079 },
27080 .phys = offset,
27081 - .id = atomic_inc_return(&next_id)
27082 + .id = atomic_inc_return_unchecked(&next_id)
27083 };
27084 map.map_id = trace->id;
27085
27086 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
27087 index d253006..e56dd6a 100644
27088 --- a/arch/x86/mm/numa_32.c
27089 +++ b/arch/x86/mm/numa_32.c
27090 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
27091 }
27092 #endif
27093
27094 -extern unsigned long find_max_low_pfn(void);
27095 extern unsigned long highend_pfn, highstart_pfn;
27096
27097 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
27098 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
27099 index e1d1069..2251ff3 100644
27100 --- a/arch/x86/mm/pageattr-test.c
27101 +++ b/arch/x86/mm/pageattr-test.c
27102 @@ -36,7 +36,7 @@ enum {
27103
27104 static int pte_testbit(pte_t pte)
27105 {
27106 - return pte_flags(pte) & _PAGE_UNUSED1;
27107 + return pte_flags(pte) & _PAGE_CPA_TEST;
27108 }
27109
27110 struct split_state {
27111 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
27112 index dd38bfb..b72c63e 100644
27113 --- a/arch/x86/mm/pageattr.c
27114 +++ b/arch/x86/mm/pageattr.c
27115 @@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
27116 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
27117 */
27118 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
27119 - pgprot_val(forbidden) |= _PAGE_NX;
27120 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27121
27122 /*
27123 * The kernel text needs to be executable for obvious reasons
27124 * Does not cover __inittext since that is gone later on. On
27125 * 64bit we do not enforce !NX on the low mapping
27126 */
27127 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
27128 - pgprot_val(forbidden) |= _PAGE_NX;
27129 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
27130 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27131
27132 +#ifdef CONFIG_DEBUG_RODATA
27133 /*
27134 * The .rodata section needs to be read-only. Using the pfn
27135 * catches all aliases.
27136 @@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
27137 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
27138 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
27139 pgprot_val(forbidden) |= _PAGE_RW;
27140 +#endif
27141 +
27142 +#ifdef CONFIG_PAX_KERNEXEC
27143 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
27144 + pgprot_val(forbidden) |= _PAGE_RW;
27145 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27146 + }
27147 +#endif
27148
27149 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
27150
27151 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
27152 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
27153 {
27154 /* change init_mm */
27155 + pax_open_kernel();
27156 set_pte_atomic(kpte, pte);
27157 +
27158 #ifdef CONFIG_X86_32
27159 if (!SHARED_KERNEL_PMD) {
27160 +
27161 +#ifdef CONFIG_PAX_PER_CPU_PGD
27162 + unsigned long cpu;
27163 +#else
27164 struct page *page;
27165 +#endif
27166
27167 +#ifdef CONFIG_PAX_PER_CPU_PGD
27168 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27169 + pgd_t *pgd = get_cpu_pgd(cpu);
27170 +#else
27171 list_for_each_entry(page, &pgd_list, lru) {
27172 - pgd_t *pgd;
27173 + pgd_t *pgd = (pgd_t *)page_address(page);
27174 +#endif
27175 +
27176 pud_t *pud;
27177 pmd_t *pmd;
27178
27179 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
27180 + pgd += pgd_index(address);
27181 pud = pud_offset(pgd, address);
27182 pmd = pmd_offset(pud, address);
27183 set_pte_atomic((pte_t *)pmd, pte);
27184 }
27185 }
27186 #endif
27187 + pax_close_kernel();
27188 }
27189
27190 static int
27191 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
27192 index e78cd0e..de0a817 100644
27193 --- a/arch/x86/mm/pat.c
27194 +++ b/arch/x86/mm/pat.c
27195 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
27196
27197 conflict:
27198 printk(KERN_INFO "%s:%d conflicting memory types "
27199 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
27200 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
27201 new->end, cattr_name(new->type), cattr_name(entry->type));
27202 return -EBUSY;
27203 }
27204 @@ -559,7 +559,7 @@ unlock_ret:
27205
27206 if (err) {
27207 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
27208 - current->comm, current->pid, start, end);
27209 + current->comm, task_pid_nr(current), start, end);
27210 }
27211
27212 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
27213 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27214 while (cursor < to) {
27215 if (!devmem_is_allowed(pfn)) {
27216 printk(KERN_INFO
27217 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27218 - current->comm, from, to);
27219 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
27220 + current->comm, from, to, cursor);
27221 return 0;
27222 }
27223 cursor += PAGE_SIZE;
27224 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
27225 printk(KERN_INFO
27226 "%s:%d ioremap_change_attr failed %s "
27227 "for %Lx-%Lx\n",
27228 - current->comm, current->pid,
27229 + current->comm, task_pid_nr(current),
27230 cattr_name(flags),
27231 base, (unsigned long long)(base + size));
27232 return -EINVAL;
27233 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
27234 free_memtype(paddr, paddr + size);
27235 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
27236 " for %Lx-%Lx, got %s\n",
27237 - current->comm, current->pid,
27238 + current->comm, task_pid_nr(current),
27239 cattr_name(want_flags),
27240 (unsigned long long)paddr,
27241 (unsigned long long)(paddr + size),
27242 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
27243 index df3d5c8..c2223e1 100644
27244 --- a/arch/x86/mm/pf_in.c
27245 +++ b/arch/x86/mm/pf_in.c
27246 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
27247 int i;
27248 enum reason_type rv = OTHERS;
27249
27250 - p = (unsigned char *)ins_addr;
27251 + p = (unsigned char *)ktla_ktva(ins_addr);
27252 p += skip_prefix(p, &prf);
27253 p += get_opcode(p, &opcode);
27254
27255 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
27256 struct prefix_bits prf;
27257 int i;
27258
27259 - p = (unsigned char *)ins_addr;
27260 + p = (unsigned char *)ktla_ktva(ins_addr);
27261 p += skip_prefix(p, &prf);
27262 p += get_opcode(p, &opcode);
27263
27264 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
27265 struct prefix_bits prf;
27266 int i;
27267
27268 - p = (unsigned char *)ins_addr;
27269 + p = (unsigned char *)ktla_ktva(ins_addr);
27270 p += skip_prefix(p, &prf);
27271 p += get_opcode(p, &opcode);
27272
27273 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
27274 int i;
27275 unsigned long rv;
27276
27277 - p = (unsigned char *)ins_addr;
27278 + p = (unsigned char *)ktla_ktva(ins_addr);
27279 p += skip_prefix(p, &prf);
27280 p += get_opcode(p, &opcode);
27281 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
27282 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
27283 int i;
27284 unsigned long rv;
27285
27286 - p = (unsigned char *)ins_addr;
27287 + p = (unsigned char *)ktla_ktva(ins_addr);
27288 p += skip_prefix(p, &prf);
27289 p += get_opcode(p, &opcode);
27290 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
27291 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
27292 index e0e6fad..c56b495 100644
27293 --- a/arch/x86/mm/pgtable.c
27294 +++ b/arch/x86/mm/pgtable.c
27295 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
27296 list_del(&page->lru);
27297 }
27298
27299 -#define UNSHARED_PTRS_PER_PGD \
27300 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27301 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27302 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
27303
27304 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
27305 +{
27306 + while (count--)
27307 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
27308 +}
27309 +#endif
27310 +
27311 +#ifdef CONFIG_PAX_PER_CPU_PGD
27312 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
27313 +{
27314 + while (count--)
27315 +
27316 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27317 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
27318 +#else
27319 + *dst++ = *src++;
27320 +#endif
27321 +
27322 +}
27323 +#endif
27324 +
27325 +#ifdef CONFIG_X86_64
27326 +#define pxd_t pud_t
27327 +#define pyd_t pgd_t
27328 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
27329 +#define pxd_free(mm, pud) pud_free((mm), (pud))
27330 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
27331 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
27332 +#define PYD_SIZE PGDIR_SIZE
27333 +#else
27334 +#define pxd_t pmd_t
27335 +#define pyd_t pud_t
27336 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
27337 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
27338 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
27339 +#define pyd_offset(mm, address) pud_offset((mm), (address))
27340 +#define PYD_SIZE PUD_SIZE
27341 +#endif
27342 +
27343 +#ifdef CONFIG_PAX_PER_CPU_PGD
27344 +static inline void pgd_ctor(pgd_t *pgd) {}
27345 +static inline void pgd_dtor(pgd_t *pgd) {}
27346 +#else
27347 static void pgd_ctor(pgd_t *pgd)
27348 {
27349 /* If the pgd points to a shared pagetable level (either the
27350 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
27351 pgd_list_del(pgd);
27352 spin_unlock_irqrestore(&pgd_lock, flags);
27353 }
27354 +#endif
27355
27356 /*
27357 * List of all pgd's needed for non-PAE so it can invalidate entries
27358 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
27359 * -- wli
27360 */
27361
27362 -#ifdef CONFIG_X86_PAE
27363 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27364 /*
27365 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
27366 * updating the top-level pagetable entries to guarantee the
27367 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
27368 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
27369 * and initialize the kernel pmds here.
27370 */
27371 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
27372 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27373
27374 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27375 {
27376 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27377 */
27378 flush_tlb_mm(mm);
27379 }
27380 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
27381 +#define PREALLOCATED_PXDS USER_PGD_PTRS
27382 #else /* !CONFIG_X86_PAE */
27383
27384 /* No need to prepopulate any pagetable entries in non-PAE modes. */
27385 -#define PREALLOCATED_PMDS 0
27386 +#define PREALLOCATED_PXDS 0
27387
27388 #endif /* CONFIG_X86_PAE */
27389
27390 -static void free_pmds(pmd_t *pmds[])
27391 +static void free_pxds(pxd_t *pxds[])
27392 {
27393 int i;
27394
27395 - for(i = 0; i < PREALLOCATED_PMDS; i++)
27396 - if (pmds[i])
27397 - free_page((unsigned long)pmds[i]);
27398 + for(i = 0; i < PREALLOCATED_PXDS; i++)
27399 + if (pxds[i])
27400 + free_page((unsigned long)pxds[i]);
27401 }
27402
27403 -static int preallocate_pmds(pmd_t *pmds[])
27404 +static int preallocate_pxds(pxd_t *pxds[])
27405 {
27406 int i;
27407 bool failed = false;
27408
27409 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
27410 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
27411 - if (pmd == NULL)
27412 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
27413 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
27414 + if (pxd == NULL)
27415 failed = true;
27416 - pmds[i] = pmd;
27417 + pxds[i] = pxd;
27418 }
27419
27420 if (failed) {
27421 - free_pmds(pmds);
27422 + free_pxds(pxds);
27423 return -ENOMEM;
27424 }
27425
27426 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
27427 * preallocate which never got a corresponding vma will need to be
27428 * freed manually.
27429 */
27430 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
27431 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
27432 {
27433 int i;
27434
27435 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
27436 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
27437 pgd_t pgd = pgdp[i];
27438
27439 if (pgd_val(pgd) != 0) {
27440 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
27441 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
27442
27443 - pgdp[i] = native_make_pgd(0);
27444 + set_pgd(pgdp + i, native_make_pgd(0));
27445
27446 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
27447 - pmd_free(mm, pmd);
27448 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
27449 + pxd_free(mm, pxd);
27450 }
27451 }
27452 }
27453
27454 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
27455 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
27456 {
27457 - pud_t *pud;
27458 + pyd_t *pyd;
27459 unsigned long addr;
27460 int i;
27461
27462 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
27463 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
27464 return;
27465
27466 - pud = pud_offset(pgd, 0);
27467 +#ifdef CONFIG_X86_64
27468 + pyd = pyd_offset(mm, 0L);
27469 +#else
27470 + pyd = pyd_offset(pgd, 0L);
27471 +#endif
27472
27473 - for (addr = i = 0; i < PREALLOCATED_PMDS;
27474 - i++, pud++, addr += PUD_SIZE) {
27475 - pmd_t *pmd = pmds[i];
27476 + for (addr = i = 0; i < PREALLOCATED_PXDS;
27477 + i++, pyd++, addr += PYD_SIZE) {
27478 + pxd_t *pxd = pxds[i];
27479
27480 if (i >= KERNEL_PGD_BOUNDARY)
27481 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27482 - sizeof(pmd_t) * PTRS_PER_PMD);
27483 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27484 + sizeof(pxd_t) * PTRS_PER_PMD);
27485
27486 - pud_populate(mm, pud, pmd);
27487 + pyd_populate(mm, pyd, pxd);
27488 }
27489 }
27490
27491 pgd_t *pgd_alloc(struct mm_struct *mm)
27492 {
27493 pgd_t *pgd;
27494 - pmd_t *pmds[PREALLOCATED_PMDS];
27495 + pxd_t *pxds[PREALLOCATED_PXDS];
27496 +
27497 unsigned long flags;
27498
27499 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
27500 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27501
27502 mm->pgd = pgd;
27503
27504 - if (preallocate_pmds(pmds) != 0)
27505 + if (preallocate_pxds(pxds) != 0)
27506 goto out_free_pgd;
27507
27508 if (paravirt_pgd_alloc(mm) != 0)
27509 - goto out_free_pmds;
27510 + goto out_free_pxds;
27511
27512 /*
27513 * Make sure that pre-populating the pmds is atomic with
27514 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27515 spin_lock_irqsave(&pgd_lock, flags);
27516
27517 pgd_ctor(pgd);
27518 - pgd_prepopulate_pmd(mm, pgd, pmds);
27519 + pgd_prepopulate_pxd(mm, pgd, pxds);
27520
27521 spin_unlock_irqrestore(&pgd_lock, flags);
27522
27523 return pgd;
27524
27525 -out_free_pmds:
27526 - free_pmds(pmds);
27527 +out_free_pxds:
27528 + free_pxds(pxds);
27529 out_free_pgd:
27530 free_page((unsigned long)pgd);
27531 out:
27532 @@ -287,7 +338,7 @@ out:
27533
27534 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
27535 {
27536 - pgd_mop_up_pmds(mm, pgd);
27537 + pgd_mop_up_pxds(mm, pgd);
27538 pgd_dtor(pgd);
27539 paravirt_pgd_free(mm, pgd);
27540 free_page((unsigned long)pgd);
27541 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
27542 index 46c8834..fcab43d 100644
27543 --- a/arch/x86/mm/pgtable_32.c
27544 +++ b/arch/x86/mm/pgtable_32.c
27545 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
27546 return;
27547 }
27548 pte = pte_offset_kernel(pmd, vaddr);
27549 +
27550 + pax_open_kernel();
27551 if (pte_val(pteval))
27552 set_pte_at(&init_mm, vaddr, pte, pteval);
27553 else
27554 pte_clear(&init_mm, vaddr, pte);
27555 + pax_close_kernel();
27556
27557 /*
27558 * It's enough to flush this one mapping.
27559 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
27560 index 513d8ed..978c161 100644
27561 --- a/arch/x86/mm/setup_nx.c
27562 +++ b/arch/x86/mm/setup_nx.c
27563 @@ -4,11 +4,10 @@
27564
27565 #include <asm/pgtable.h>
27566
27567 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27568 int nx_enabled;
27569
27570 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27571 -static int disable_nx __cpuinitdata;
27572 -
27573 +#ifndef CONFIG_PAX_PAGEEXEC
27574 /*
27575 * noexec = on|off
27576 *
27577 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
27578 if (!str)
27579 return -EINVAL;
27580 if (!strncmp(str, "on", 2)) {
27581 - __supported_pte_mask |= _PAGE_NX;
27582 - disable_nx = 0;
27583 + nx_enabled = 1;
27584 } else if (!strncmp(str, "off", 3)) {
27585 - disable_nx = 1;
27586 - __supported_pte_mask &= ~_PAGE_NX;
27587 + nx_enabled = 0;
27588 }
27589 return 0;
27590 }
27591 early_param("noexec", noexec_setup);
27592 #endif
27593 +#endif
27594
27595 #ifdef CONFIG_X86_PAE
27596 void __init set_nx(void)
27597 {
27598 - unsigned int v[4], l, h;
27599 + if (!nx_enabled && cpu_has_nx) {
27600 + unsigned l, h;
27601
27602 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
27603 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
27604 -
27605 - if ((v[3] & (1 << 20)) && !disable_nx) {
27606 - rdmsr(MSR_EFER, l, h);
27607 - l |= EFER_NX;
27608 - wrmsr(MSR_EFER, l, h);
27609 - nx_enabled = 1;
27610 - __supported_pte_mask |= _PAGE_NX;
27611 - }
27612 + __supported_pte_mask &= ~_PAGE_NX;
27613 + rdmsr(MSR_EFER, l, h);
27614 + l &= ~EFER_NX;
27615 + wrmsr(MSR_EFER, l, h);
27616 }
27617 }
27618 #else
27619 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
27620 unsigned long efer;
27621
27622 rdmsrl(MSR_EFER, efer);
27623 - if (!(efer & EFER_NX) || disable_nx)
27624 + if (!(efer & EFER_NX) || !nx_enabled)
27625 __supported_pte_mask &= ~_PAGE_NX;
27626 }
27627 #endif
27628 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
27629 index 36fe08e..b123d3a 100644
27630 --- a/arch/x86/mm/tlb.c
27631 +++ b/arch/x86/mm/tlb.c
27632 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
27633 BUG();
27634 cpumask_clear_cpu(cpu,
27635 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
27636 +
27637 +#ifndef CONFIG_PAX_PER_CPU_PGD
27638 load_cr3(swapper_pg_dir);
27639 +#endif
27640 +
27641 }
27642 EXPORT_SYMBOL_GPL(leave_mm);
27643
27644 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
27645 index 829edf0..672adb3 100644
27646 --- a/arch/x86/oprofile/backtrace.c
27647 +++ b/arch/x86/oprofile/backtrace.c
27648 @@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
27649 {
27650 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
27651
27652 - if (!user_mode_vm(regs)) {
27653 + if (!user_mode(regs)) {
27654 unsigned long stack = kernel_stack_pointer(regs);
27655 if (depth)
27656 dump_trace(NULL, regs, (unsigned long *)stack, 0,
27657 diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
27658 index e6a160a..36deff6 100644
27659 --- a/arch/x86/oprofile/op_model_p4.c
27660 +++ b/arch/x86/oprofile/op_model_p4.c
27661 @@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
27662 #endif
27663 }
27664
27665 -static int inline addr_increment(void)
27666 +static inline int addr_increment(void)
27667 {
27668 #ifdef CONFIG_SMP
27669 return smp_num_siblings == 2 ? 2 : 1;
27670 diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
27671 index 1331fcf..03901b2 100644
27672 --- a/arch/x86/pci/common.c
27673 +++ b/arch/x86/pci/common.c
27674 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
27675 int pcibios_last_bus = -1;
27676 unsigned long pirq_table_addr;
27677 struct pci_bus *pci_root_bus;
27678 -struct pci_raw_ops *raw_pci_ops;
27679 -struct pci_raw_ops *raw_pci_ext_ops;
27680 +const struct pci_raw_ops *raw_pci_ops;
27681 +const struct pci_raw_ops *raw_pci_ext_ops;
27682
27683 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
27684 int reg, int len, u32 *val)
27685 diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
27686 index 347d882..4baf6b6 100644
27687 --- a/arch/x86/pci/direct.c
27688 +++ b/arch/x86/pci/direct.c
27689 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
27690
27691 #undef PCI_CONF1_ADDRESS
27692
27693 -struct pci_raw_ops pci_direct_conf1 = {
27694 +const struct pci_raw_ops pci_direct_conf1 = {
27695 .read = pci_conf1_read,
27696 .write = pci_conf1_write,
27697 };
27698 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
27699
27700 #undef PCI_CONF2_ADDRESS
27701
27702 -struct pci_raw_ops pci_direct_conf2 = {
27703 +const struct pci_raw_ops pci_direct_conf2 = {
27704 .read = pci_conf2_read,
27705 .write = pci_conf2_write,
27706 };
27707 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
27708 * This should be close to trivial, but it isn't, because there are buggy
27709 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
27710 */
27711 -static int __init pci_sanity_check(struct pci_raw_ops *o)
27712 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
27713 {
27714 u32 x = 0;
27715 int year, devfn;
27716 diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
27717 index f10a7e9..0425342 100644
27718 --- a/arch/x86/pci/mmconfig_32.c
27719 +++ b/arch/x86/pci/mmconfig_32.c
27720 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
27721 return 0;
27722 }
27723
27724 -static struct pci_raw_ops pci_mmcfg = {
27725 +static const struct pci_raw_ops pci_mmcfg = {
27726 .read = pci_mmcfg_read,
27727 .write = pci_mmcfg_write,
27728 };
27729 diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
27730 index 94349f8..41600a7 100644
27731 --- a/arch/x86/pci/mmconfig_64.c
27732 +++ b/arch/x86/pci/mmconfig_64.c
27733 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
27734 return 0;
27735 }
27736
27737 -static struct pci_raw_ops pci_mmcfg = {
27738 +static const struct pci_raw_ops pci_mmcfg = {
27739 .read = pci_mmcfg_read,
27740 .write = pci_mmcfg_write,
27741 };
27742 diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
27743 index 8eb295e..86bd657 100644
27744 --- a/arch/x86/pci/numaq_32.c
27745 +++ b/arch/x86/pci/numaq_32.c
27746 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
27747
27748 #undef PCI_CONF1_MQ_ADDRESS
27749
27750 -static struct pci_raw_ops pci_direct_conf1_mq = {
27751 +static const struct pci_raw_ops pci_direct_conf1_mq = {
27752 .read = pci_conf1_mq_read,
27753 .write = pci_conf1_mq_write
27754 };
27755 diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
27756 index b889d82..5a58a0a 100644
27757 --- a/arch/x86/pci/olpc.c
27758 +++ b/arch/x86/pci/olpc.c
27759 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
27760 return 0;
27761 }
27762
27763 -static struct pci_raw_ops pci_olpc_conf = {
27764 +static const struct pci_raw_ops pci_olpc_conf = {
27765 .read = pci_olpc_read,
27766 .write = pci_olpc_write,
27767 };
27768 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
27769 index 1c975cc..b8e16c2 100644
27770 --- a/arch/x86/pci/pcbios.c
27771 +++ b/arch/x86/pci/pcbios.c
27772 @@ -56,50 +56,93 @@ union bios32 {
27773 static struct {
27774 unsigned long address;
27775 unsigned short segment;
27776 -} bios32_indirect = { 0, __KERNEL_CS };
27777 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
27778
27779 /*
27780 * Returns the entry point for the given service, NULL on error
27781 */
27782
27783 -static unsigned long bios32_service(unsigned long service)
27784 +static unsigned long __devinit bios32_service(unsigned long service)
27785 {
27786 unsigned char return_code; /* %al */
27787 unsigned long address; /* %ebx */
27788 unsigned long length; /* %ecx */
27789 unsigned long entry; /* %edx */
27790 unsigned long flags;
27791 + struct desc_struct d, *gdt;
27792
27793 local_irq_save(flags);
27794 - __asm__("lcall *(%%edi); cld"
27795 +
27796 + gdt = get_cpu_gdt_table(smp_processor_id());
27797 +
27798 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
27799 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27800 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
27801 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27802 +
27803 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
27804 : "=a" (return_code),
27805 "=b" (address),
27806 "=c" (length),
27807 "=d" (entry)
27808 : "0" (service),
27809 "1" (0),
27810 - "D" (&bios32_indirect));
27811 + "D" (&bios32_indirect),
27812 + "r"(__PCIBIOS_DS)
27813 + : "memory");
27814 +
27815 + pax_open_kernel();
27816 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
27817 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
27818 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
27819 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
27820 + pax_close_kernel();
27821 +
27822 local_irq_restore(flags);
27823
27824 switch (return_code) {
27825 - case 0:
27826 - return address + entry;
27827 - case 0x80: /* Not present */
27828 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27829 - return 0;
27830 - default: /* Shouldn't happen */
27831 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27832 - service, return_code);
27833 + case 0: {
27834 + int cpu;
27835 + unsigned char flags;
27836 +
27837 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
27838 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
27839 + printk(KERN_WARNING "bios32_service: not valid\n");
27840 return 0;
27841 + }
27842 + address = address + PAGE_OFFSET;
27843 + length += 16UL; /* some BIOSs underreport this... */
27844 + flags = 4;
27845 + if (length >= 64*1024*1024) {
27846 + length >>= PAGE_SHIFT;
27847 + flags |= 8;
27848 + }
27849 +
27850 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27851 + gdt = get_cpu_gdt_table(cpu);
27852 + pack_descriptor(&d, address, length, 0x9b, flags);
27853 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27854 + pack_descriptor(&d, address, length, 0x93, flags);
27855 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27856 + }
27857 + return entry;
27858 + }
27859 + case 0x80: /* Not present */
27860 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27861 + return 0;
27862 + default: /* Shouldn't happen */
27863 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27864 + service, return_code);
27865 + return 0;
27866 }
27867 }
27868
27869 static struct {
27870 unsigned long address;
27871 unsigned short segment;
27872 -} pci_indirect = { 0, __KERNEL_CS };
27873 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
27874
27875 -static int pci_bios_present;
27876 +static int pci_bios_present __read_only;
27877
27878 static int __devinit check_pcibios(void)
27879 {
27880 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
27881 unsigned long flags, pcibios_entry;
27882
27883 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
27884 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
27885 + pci_indirect.address = pcibios_entry;
27886
27887 local_irq_save(flags);
27888 - __asm__(
27889 - "lcall *(%%edi); cld\n\t"
27890 + __asm__("movw %w6, %%ds\n\t"
27891 + "lcall *%%ss:(%%edi); cld\n\t"
27892 + "push %%ss\n\t"
27893 + "pop %%ds\n\t"
27894 "jc 1f\n\t"
27895 "xor %%ah, %%ah\n"
27896 "1:"
27897 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
27898 "=b" (ebx),
27899 "=c" (ecx)
27900 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
27901 - "D" (&pci_indirect)
27902 + "D" (&pci_indirect),
27903 + "r" (__PCIBIOS_DS)
27904 : "memory");
27905 local_irq_restore(flags);
27906
27907 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27908
27909 switch (len) {
27910 case 1:
27911 - __asm__("lcall *(%%esi); cld\n\t"
27912 + __asm__("movw %w6, %%ds\n\t"
27913 + "lcall *%%ss:(%%esi); cld\n\t"
27914 + "push %%ss\n\t"
27915 + "pop %%ds\n\t"
27916 "jc 1f\n\t"
27917 "xor %%ah, %%ah\n"
27918 "1:"
27919 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27920 : "1" (PCIBIOS_READ_CONFIG_BYTE),
27921 "b" (bx),
27922 "D" ((long)reg),
27923 - "S" (&pci_indirect));
27924 + "S" (&pci_indirect),
27925 + "r" (__PCIBIOS_DS));
27926 /*
27927 * Zero-extend the result beyond 8 bits, do not trust the
27928 * BIOS having done it:
27929 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27930 *value &= 0xff;
27931 break;
27932 case 2:
27933 - __asm__("lcall *(%%esi); cld\n\t"
27934 + __asm__("movw %w6, %%ds\n\t"
27935 + "lcall *%%ss:(%%esi); cld\n\t"
27936 + "push %%ss\n\t"
27937 + "pop %%ds\n\t"
27938 "jc 1f\n\t"
27939 "xor %%ah, %%ah\n"
27940 "1:"
27941 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27942 : "1" (PCIBIOS_READ_CONFIG_WORD),
27943 "b" (bx),
27944 "D" ((long)reg),
27945 - "S" (&pci_indirect));
27946 + "S" (&pci_indirect),
27947 + "r" (__PCIBIOS_DS));
27948 /*
27949 * Zero-extend the result beyond 16 bits, do not trust the
27950 * BIOS having done it:
27951 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27952 *value &= 0xffff;
27953 break;
27954 case 4:
27955 - __asm__("lcall *(%%esi); cld\n\t"
27956 + __asm__("movw %w6, %%ds\n\t"
27957 + "lcall *%%ss:(%%esi); cld\n\t"
27958 + "push %%ss\n\t"
27959 + "pop %%ds\n\t"
27960 "jc 1f\n\t"
27961 "xor %%ah, %%ah\n"
27962 "1:"
27963 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27964 : "1" (PCIBIOS_READ_CONFIG_DWORD),
27965 "b" (bx),
27966 "D" ((long)reg),
27967 - "S" (&pci_indirect));
27968 + "S" (&pci_indirect),
27969 + "r" (__PCIBIOS_DS));
27970 break;
27971 }
27972
27973 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27974
27975 switch (len) {
27976 case 1:
27977 - __asm__("lcall *(%%esi); cld\n\t"
27978 + __asm__("movw %w6, %%ds\n\t"
27979 + "lcall *%%ss:(%%esi); cld\n\t"
27980 + "push %%ss\n\t"
27981 + "pop %%ds\n\t"
27982 "jc 1f\n\t"
27983 "xor %%ah, %%ah\n"
27984 "1:"
27985 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27986 "c" (value),
27987 "b" (bx),
27988 "D" ((long)reg),
27989 - "S" (&pci_indirect));
27990 + "S" (&pci_indirect),
27991 + "r" (__PCIBIOS_DS));
27992 break;
27993 case 2:
27994 - __asm__("lcall *(%%esi); cld\n\t"
27995 + __asm__("movw %w6, %%ds\n\t"
27996 + "lcall *%%ss:(%%esi); cld\n\t"
27997 + "push %%ss\n\t"
27998 + "pop %%ds\n\t"
27999 "jc 1f\n\t"
28000 "xor %%ah, %%ah\n"
28001 "1:"
28002 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28003 "c" (value),
28004 "b" (bx),
28005 "D" ((long)reg),
28006 - "S" (&pci_indirect));
28007 + "S" (&pci_indirect),
28008 + "r" (__PCIBIOS_DS));
28009 break;
28010 case 4:
28011 - __asm__("lcall *(%%esi); cld\n\t"
28012 + __asm__("movw %w6, %%ds\n\t"
28013 + "lcall *%%ss:(%%esi); cld\n\t"
28014 + "push %%ss\n\t"
28015 + "pop %%ds\n\t"
28016 "jc 1f\n\t"
28017 "xor %%ah, %%ah\n"
28018 "1:"
28019 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28020 "c" (value),
28021 "b" (bx),
28022 "D" ((long)reg),
28023 - "S" (&pci_indirect));
28024 + "S" (&pci_indirect),
28025 + "r" (__PCIBIOS_DS));
28026 break;
28027 }
28028
28029 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28030 * Function table for BIOS32 access
28031 */
28032
28033 -static struct pci_raw_ops pci_bios_access = {
28034 +static const struct pci_raw_ops pci_bios_access = {
28035 .read = pci_bios_read,
28036 .write = pci_bios_write
28037 };
28038 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
28039 * Try to find PCI BIOS.
28040 */
28041
28042 -static struct pci_raw_ops * __devinit pci_find_bios(void)
28043 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
28044 {
28045 union bios32 *check;
28046 unsigned char sum;
28047 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28048
28049 DBG("PCI: Fetching IRQ routing table... ");
28050 __asm__("push %%es\n\t"
28051 + "movw %w8, %%ds\n\t"
28052 "push %%ds\n\t"
28053 "pop %%es\n\t"
28054 - "lcall *(%%esi); cld\n\t"
28055 + "lcall *%%ss:(%%esi); cld\n\t"
28056 "pop %%es\n\t"
28057 + "push %%ss\n\t"
28058 + "pop %%ds\n"
28059 "jc 1f\n\t"
28060 "xor %%ah, %%ah\n"
28061 "1:"
28062 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28063 "1" (0),
28064 "D" ((long) &opt),
28065 "S" (&pci_indirect),
28066 - "m" (opt)
28067 + "m" (opt),
28068 + "r" (__PCIBIOS_DS)
28069 : "memory");
28070 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
28071 if (ret & 0xff00)
28072 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28073 {
28074 int ret;
28075
28076 - __asm__("lcall *(%%esi); cld\n\t"
28077 + __asm__("movw %w5, %%ds\n\t"
28078 + "lcall *%%ss:(%%esi); cld\n\t"
28079 + "push %%ss\n\t"
28080 + "pop %%ds\n"
28081 "jc 1f\n\t"
28082 "xor %%ah, %%ah\n"
28083 "1:"
28084 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28085 : "0" (PCIBIOS_SET_PCI_HW_INT),
28086 "b" ((dev->bus->number << 8) | dev->devfn),
28087 "c" ((irq << 8) | (pin + 10)),
28088 - "S" (&pci_indirect));
28089 + "S" (&pci_indirect),
28090 + "r" (__PCIBIOS_DS));
28091 return !(ret & 0xff00);
28092 }
28093 EXPORT_SYMBOL(pcibios_set_irq_routing);
28094 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
28095 index fa0f651..9d8f3d9 100644
28096 --- a/arch/x86/power/cpu.c
28097 +++ b/arch/x86/power/cpu.c
28098 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
28099 static void fix_processor_context(void)
28100 {
28101 int cpu = smp_processor_id();
28102 - struct tss_struct *t = &per_cpu(init_tss, cpu);
28103 + struct tss_struct *t = init_tss + cpu;
28104
28105 set_tss_desc(cpu, t); /*
28106 * This just modifies memory; should not be
28107 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
28108 */
28109
28110 #ifdef CONFIG_X86_64
28111 + pax_open_kernel();
28112 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
28113 + pax_close_kernel();
28114
28115 syscall_init(); /* This sets MSR_*STAR and related */
28116 #endif
28117 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
28118 index dd78ef6..f9d928d 100644
28119 --- a/arch/x86/vdso/Makefile
28120 +++ b/arch/x86/vdso/Makefile
28121 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
28122 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
28123 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
28124
28125 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28126 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28127 GCOV_PROFILE := n
28128
28129 #
28130 diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
28131 index ee55754..0013b2e 100644
28132 --- a/arch/x86/vdso/vclock_gettime.c
28133 +++ b/arch/x86/vdso/vclock_gettime.c
28134 @@ -22,24 +22,48 @@
28135 #include <asm/hpet.h>
28136 #include <asm/unistd.h>
28137 #include <asm/io.h>
28138 +#include <asm/fixmap.h>
28139 #include "vextern.h"
28140
28141 #define gtod vdso_vsyscall_gtod_data
28142
28143 +notrace noinline long __vdso_fallback_time(long *t)
28144 +{
28145 + long secs;
28146 + asm volatile("syscall"
28147 + : "=a" (secs)
28148 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
28149 + return secs;
28150 +}
28151 +
28152 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
28153 {
28154 long ret;
28155 asm("syscall" : "=a" (ret) :
28156 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
28157 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
28158 return ret;
28159 }
28160
28161 +notrace static inline cycle_t __vdso_vread_hpet(void)
28162 +{
28163 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
28164 +}
28165 +
28166 +notrace static inline cycle_t __vdso_vread_tsc(void)
28167 +{
28168 + cycle_t ret = (cycle_t)vget_cycles();
28169 +
28170 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
28171 +}
28172 +
28173 notrace static inline long vgetns(void)
28174 {
28175 long v;
28176 - cycles_t (*vread)(void);
28177 - vread = gtod->clock.vread;
28178 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
28179 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
28180 + v = __vdso_vread_tsc();
28181 + else
28182 + v = __vdso_vread_hpet();
28183 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
28184 return (v * gtod->clock.mult) >> gtod->clock.shift;
28185 }
28186
28187 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
28188
28189 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
28190 {
28191 - if (likely(gtod->sysctl_enabled))
28192 + if (likely(gtod->sysctl_enabled &&
28193 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
28194 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
28195 switch (clock) {
28196 case CLOCK_REALTIME:
28197 if (likely(gtod->clock.vread))
28198 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
28199 int clock_gettime(clockid_t, struct timespec *)
28200 __attribute__((weak, alias("__vdso_clock_gettime")));
28201
28202 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
28203 +{
28204 + long ret;
28205 + asm("syscall" : "=a" (ret) :
28206 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
28207 + return ret;
28208 +}
28209 +
28210 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
28211 {
28212 - long ret;
28213 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
28214 + if (likely(gtod->sysctl_enabled &&
28215 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
28216 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
28217 + {
28218 if (likely(tv != NULL)) {
28219 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
28220 offsetof(struct timespec, tv_nsec) ||
28221 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
28222 }
28223 return 0;
28224 }
28225 - asm("syscall" : "=a" (ret) :
28226 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
28227 - return ret;
28228 + return __vdso_fallback_gettimeofday(tv, tz);
28229 }
28230 int gettimeofday(struct timeval *, struct timezone *)
28231 __attribute__((weak, alias("__vdso_gettimeofday")));
28232 diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
28233 index 4e5dd3b..00ba15e 100644
28234 --- a/arch/x86/vdso/vdso.lds.S
28235 +++ b/arch/x86/vdso/vdso.lds.S
28236 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
28237 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
28238 #include "vextern.h"
28239 #undef VEXTERN
28240 +
28241 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
28242 +VEXTERN(fallback_gettimeofday)
28243 +VEXTERN(fallback_time)
28244 +VEXTERN(getcpu)
28245 +#undef VEXTERN
28246 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
28247 index 58bc00f..d53fb48 100644
28248 --- a/arch/x86/vdso/vdso32-setup.c
28249 +++ b/arch/x86/vdso/vdso32-setup.c
28250 @@ -25,6 +25,7 @@
28251 #include <asm/tlbflush.h>
28252 #include <asm/vdso.h>
28253 #include <asm/proto.h>
28254 +#include <asm/mman.h>
28255
28256 enum {
28257 VDSO_DISABLED = 0,
28258 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
28259 void enable_sep_cpu(void)
28260 {
28261 int cpu = get_cpu();
28262 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
28263 + struct tss_struct *tss = init_tss + cpu;
28264
28265 if (!boot_cpu_has(X86_FEATURE_SEP)) {
28266 put_cpu();
28267 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
28268 gate_vma.vm_start = FIXADDR_USER_START;
28269 gate_vma.vm_end = FIXADDR_USER_END;
28270 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
28271 - gate_vma.vm_page_prot = __P101;
28272 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
28273 /*
28274 * Make sure the vDSO gets into every core dump.
28275 * Dumping its contents makes post-mortem fully interpretable later
28276 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28277 if (compat)
28278 addr = VDSO_HIGH_BASE;
28279 else {
28280 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
28281 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
28282 if (IS_ERR_VALUE(addr)) {
28283 ret = addr;
28284 goto up_fail;
28285 }
28286 }
28287
28288 - current->mm->context.vdso = (void *)addr;
28289 + current->mm->context.vdso = addr;
28290
28291 if (compat_uses_vma || !compat) {
28292 /*
28293 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28294 }
28295
28296 current_thread_info()->sysenter_return =
28297 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28298 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28299
28300 up_fail:
28301 if (ret)
28302 - current->mm->context.vdso = NULL;
28303 + current->mm->context.vdso = 0;
28304
28305 up_write(&mm->mmap_sem);
28306
28307 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
28308
28309 const char *arch_vma_name(struct vm_area_struct *vma)
28310 {
28311 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28312 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28313 return "[vdso]";
28314 +
28315 +#ifdef CONFIG_PAX_SEGMEXEC
28316 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
28317 + return "[vdso]";
28318 +#endif
28319 +
28320 return NULL;
28321 }
28322
28323 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
28324 struct mm_struct *mm = tsk->mm;
28325
28326 /* Check to see if this task was created in compat vdso mode */
28327 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
28328 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
28329 return &gate_vma;
28330 return NULL;
28331 }
28332 diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
28333 index 1683ba2..48d07f3 100644
28334 --- a/arch/x86/vdso/vextern.h
28335 +++ b/arch/x86/vdso/vextern.h
28336 @@ -11,6 +11,5 @@
28337 put into vextern.h and be referenced as a pointer with vdso prefix.
28338 The main kernel later fills in the values. */
28339
28340 -VEXTERN(jiffies)
28341 VEXTERN(vgetcpu_mode)
28342 VEXTERN(vsyscall_gtod_data)
28343 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
28344 index 21e1aeb..2c0b3c4 100644
28345 --- a/arch/x86/vdso/vma.c
28346 +++ b/arch/x86/vdso/vma.c
28347 @@ -17,8 +17,6 @@
28348 #include "vextern.h" /* Just for VMAGIC. */
28349 #undef VEXTERN
28350
28351 -unsigned int __read_mostly vdso_enabled = 1;
28352 -
28353 extern char vdso_start[], vdso_end[];
28354 extern unsigned short vdso_sync_cpuid;
28355
28356 @@ -27,10 +25,8 @@ static unsigned vdso_size;
28357
28358 static inline void *var_ref(void *p, char *name)
28359 {
28360 - if (*(void **)p != (void *)VMAGIC) {
28361 - printk("VDSO: variable %s broken\n", name);
28362 - vdso_enabled = 0;
28363 - }
28364 + if (*(void **)p != (void *)VMAGIC)
28365 + panic("VDSO: variable %s broken\n", name);
28366 return p;
28367 }
28368
28369 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
28370 if (!vbase)
28371 goto oom;
28372
28373 - if (memcmp(vbase, "\177ELF", 4)) {
28374 - printk("VDSO: I'm broken; not ELF\n");
28375 - vdso_enabled = 0;
28376 - }
28377 + if (memcmp(vbase, ELFMAG, SELFMAG))
28378 + panic("VDSO: I'm broken; not ELF\n");
28379
28380 #define VEXTERN(x) \
28381 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
28382 #include "vextern.h"
28383 #undef VEXTERN
28384 + vunmap(vbase);
28385 return 0;
28386
28387 oom:
28388 - printk("Cannot allocate vdso\n");
28389 - vdso_enabled = 0;
28390 - return -ENOMEM;
28391 + panic("Cannot allocate vdso\n");
28392 }
28393 __initcall(init_vdso_vars);
28394
28395 @@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
28396 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28397 {
28398 struct mm_struct *mm = current->mm;
28399 - unsigned long addr;
28400 + unsigned long addr = 0;
28401 int ret;
28402
28403 - if (!vdso_enabled)
28404 - return 0;
28405 -
28406 down_write(&mm->mmap_sem);
28407 +
28408 +#ifdef CONFIG_PAX_RANDMMAP
28409 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28410 +#endif
28411 +
28412 addr = vdso_addr(mm->start_stack, vdso_size);
28413 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
28414 if (IS_ERR_VALUE(addr)) {
28415 @@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28416 goto up_fail;
28417 }
28418
28419 - current->mm->context.vdso = (void *)addr;
28420 + current->mm->context.vdso = addr;
28421
28422 ret = install_special_mapping(mm, addr, vdso_size,
28423 VM_READ|VM_EXEC|
28424 @@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28425 VM_ALWAYSDUMP,
28426 vdso_pages);
28427 if (ret) {
28428 - current->mm->context.vdso = NULL;
28429 + current->mm->context.vdso = 0;
28430 goto up_fail;
28431 }
28432
28433 @@ -132,10 +127,3 @@ up_fail:
28434 up_write(&mm->mmap_sem);
28435 return ret;
28436 }
28437 -
28438 -static __init int vdso_setup(char *s)
28439 -{
28440 - vdso_enabled = simple_strtoul(s, NULL, 0);
28441 - return 0;
28442 -}
28443 -__setup("vdso=", vdso_setup);
28444 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
28445 index 0087b00..eecb34f 100644
28446 --- a/arch/x86/xen/enlighten.c
28447 +++ b/arch/x86/xen/enlighten.c
28448 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
28449
28450 struct shared_info xen_dummy_shared_info;
28451
28452 -void *xen_initial_gdt;
28453 -
28454 /*
28455 * Point at some empty memory to start with. We map the real shared_info
28456 * page as soon as fixmap is up and running.
28457 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
28458
28459 preempt_disable();
28460
28461 - start = __get_cpu_var(idt_desc).address;
28462 + start = (unsigned long)__get_cpu_var(idt_desc).address;
28463 end = start + __get_cpu_var(idt_desc).size + 1;
28464
28465 xen_mc_flush();
28466 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
28467 #endif
28468 };
28469
28470 -static void xen_reboot(int reason)
28471 +static __noreturn void xen_reboot(int reason)
28472 {
28473 struct sched_shutdown r = { .reason = reason };
28474
28475 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
28476 BUG();
28477 }
28478
28479 -static void xen_restart(char *msg)
28480 +static __noreturn void xen_restart(char *msg)
28481 {
28482 xen_reboot(SHUTDOWN_reboot);
28483 }
28484
28485 -static void xen_emergency_restart(void)
28486 +static __noreturn void xen_emergency_restart(void)
28487 {
28488 xen_reboot(SHUTDOWN_reboot);
28489 }
28490
28491 -static void xen_machine_halt(void)
28492 +static __noreturn void xen_machine_halt(void)
28493 {
28494 xen_reboot(SHUTDOWN_poweroff);
28495 }
28496 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
28497 */
28498 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
28499
28500 -#ifdef CONFIG_X86_64
28501 /* Work out if we support NX */
28502 - check_efer();
28503 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28504 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
28505 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
28506 + unsigned l, h;
28507 +
28508 +#ifdef CONFIG_X86_PAE
28509 + nx_enabled = 1;
28510 +#endif
28511 + __supported_pte_mask |= _PAGE_NX;
28512 + rdmsr(MSR_EFER, l, h);
28513 + l |= EFER_NX;
28514 + wrmsr(MSR_EFER, l, h);
28515 + }
28516 #endif
28517
28518 xen_setup_features();
28519 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
28520
28521 machine_ops = xen_machine_ops;
28522
28523 - /*
28524 - * The only reliable way to retain the initial address of the
28525 - * percpu gdt_page is to remember it here, so we can go and
28526 - * mark it RW later, when the initial percpu area is freed.
28527 - */
28528 - xen_initial_gdt = &per_cpu(gdt_page, 0);
28529 -
28530 xen_smp_init();
28531
28532 pgd = (pgd_t *)xen_start_info->pt_base;
28533 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
28534 index 3f90a2c..2c2ad84 100644
28535 --- a/arch/x86/xen/mmu.c
28536 +++ b/arch/x86/xen/mmu.c
28537 @@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
28538 convert_pfn_mfn(init_level4_pgt);
28539 convert_pfn_mfn(level3_ident_pgt);
28540 convert_pfn_mfn(level3_kernel_pgt);
28541 + convert_pfn_mfn(level3_vmalloc_start_pgt);
28542 + convert_pfn_mfn(level3_vmalloc_end_pgt);
28543 + convert_pfn_mfn(level3_vmemmap_pgt);
28544
28545 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
28546 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
28547 @@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
28548 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
28549 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
28550 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
28551 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
28552 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
28553 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
28554 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
28555 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
28556 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
28557 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
28558
28559 @@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
28560 pv_mmu_ops.set_pud = xen_set_pud;
28561 #if PAGETABLE_LEVELS == 4
28562 pv_mmu_ops.set_pgd = xen_set_pgd;
28563 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
28564 #endif
28565
28566 /* This will work as long as patching hasn't happened yet
28567 @@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
28568 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
28569 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
28570 .set_pgd = xen_set_pgd_hyper,
28571 + .set_pgd_batched = xen_set_pgd_hyper,
28572
28573 .alloc_pud = xen_alloc_pmd_init,
28574 .release_pud = xen_release_pmd_init,
28575 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
28576 index a96204a..fca9b8e 100644
28577 --- a/arch/x86/xen/smp.c
28578 +++ b/arch/x86/xen/smp.c
28579 @@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
28580 {
28581 BUG_ON(smp_processor_id() != 0);
28582 native_smp_prepare_boot_cpu();
28583 -
28584 - /* We've switched to the "real" per-cpu gdt, so make sure the
28585 - old memory can be recycled */
28586 - make_lowmem_page_readwrite(xen_initial_gdt);
28587 -
28588 xen_setup_vcpu_info_placement();
28589 }
28590
28591 @@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
28592 gdt = get_cpu_gdt_table(cpu);
28593
28594 ctxt->flags = VGCF_IN_KERNEL;
28595 - ctxt->user_regs.ds = __USER_DS;
28596 - ctxt->user_regs.es = __USER_DS;
28597 + ctxt->user_regs.ds = __KERNEL_DS;
28598 + ctxt->user_regs.es = __KERNEL_DS;
28599 ctxt->user_regs.ss = __KERNEL_DS;
28600 #ifdef CONFIG_X86_32
28601 ctxt->user_regs.fs = __KERNEL_PERCPU;
28602 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
28603 + savesegment(gs, ctxt->user_regs.gs);
28604 #else
28605 ctxt->gs_base_kernel = per_cpu_offset(cpu);
28606 #endif
28607 @@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
28608 int rc;
28609
28610 per_cpu(current_task, cpu) = idle;
28611 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
28612 #ifdef CONFIG_X86_32
28613 irq_ctx_init(cpu);
28614 #else
28615 clear_tsk_thread_flag(idle, TIF_FORK);
28616 - per_cpu(kernel_stack, cpu) =
28617 - (unsigned long)task_stack_page(idle) -
28618 - KERNEL_STACK_OFFSET + THREAD_SIZE;
28619 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
28620 #endif
28621 xen_setup_runstate_info(cpu);
28622 xen_setup_timer(cpu);
28623 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
28624 index 9a95a9c..4f39e774 100644
28625 --- a/arch/x86/xen/xen-asm_32.S
28626 +++ b/arch/x86/xen/xen-asm_32.S
28627 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
28628 ESP_OFFSET=4 # bytes pushed onto stack
28629
28630 /*
28631 - * Store vcpu_info pointer for easy access. Do it this way to
28632 - * avoid having to reload %fs
28633 + * Store vcpu_info pointer for easy access.
28634 */
28635 #ifdef CONFIG_SMP
28636 - GET_THREAD_INFO(%eax)
28637 - movl TI_cpu(%eax), %eax
28638 - movl __per_cpu_offset(,%eax,4), %eax
28639 - mov per_cpu__xen_vcpu(%eax), %eax
28640 + push %fs
28641 + mov $(__KERNEL_PERCPU), %eax
28642 + mov %eax, %fs
28643 + mov PER_CPU_VAR(xen_vcpu), %eax
28644 + pop %fs
28645 #else
28646 movl per_cpu__xen_vcpu, %eax
28647 #endif
28648 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
28649 index 1a5ff24..a187d40 100644
28650 --- a/arch/x86/xen/xen-head.S
28651 +++ b/arch/x86/xen/xen-head.S
28652 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
28653 #ifdef CONFIG_X86_32
28654 mov %esi,xen_start_info
28655 mov $init_thread_union+THREAD_SIZE,%esp
28656 +#ifdef CONFIG_SMP
28657 + movl $cpu_gdt_table,%edi
28658 + movl $__per_cpu_load,%eax
28659 + movw %ax,__KERNEL_PERCPU + 2(%edi)
28660 + rorl $16,%eax
28661 + movb %al,__KERNEL_PERCPU + 4(%edi)
28662 + movb %ah,__KERNEL_PERCPU + 7(%edi)
28663 + movl $__per_cpu_end - 1,%eax
28664 + subl $__per_cpu_start,%eax
28665 + movw %ax,__KERNEL_PERCPU + 0(%edi)
28666 +#endif
28667 #else
28668 mov %rsi,xen_start_info
28669 mov $init_thread_union+THREAD_SIZE,%rsp
28670 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
28671 index f9153a3..51eab3d 100644
28672 --- a/arch/x86/xen/xen-ops.h
28673 +++ b/arch/x86/xen/xen-ops.h
28674 @@ -10,8 +10,6 @@
28675 extern const char xen_hypervisor_callback[];
28676 extern const char xen_failsafe_callback[];
28677
28678 -extern void *xen_initial_gdt;
28679 -
28680 struct trap_info;
28681 void xen_copy_trap_info(struct trap_info *traps);
28682
28683 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
28684 index 525bd3d..ef888b1 100644
28685 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
28686 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
28687 @@ -119,9 +119,9 @@
28688 ----------------------------------------------------------------------*/
28689
28690 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
28691 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
28692 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
28693 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
28694 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28695
28696 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
28697 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
28698 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
28699 index 2f33760..835e50a 100644
28700 --- a/arch/xtensa/variants/fsf/include/variant/core.h
28701 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
28702 @@ -11,6 +11,7 @@
28703 #ifndef _XTENSA_CORE_H
28704 #define _XTENSA_CORE_H
28705
28706 +#include <linux/const.h>
28707
28708 /****************************************************************************
28709 Parameters Useful for Any Code, USER or PRIVILEGED
28710 @@ -112,9 +113,9 @@
28711 ----------------------------------------------------------------------*/
28712
28713 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28714 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28715 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28716 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28717 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28718
28719 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
28720 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
28721 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
28722 index af00795..2bb8105 100644
28723 --- a/arch/xtensa/variants/s6000/include/variant/core.h
28724 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
28725 @@ -11,6 +11,7 @@
28726 #ifndef _XTENSA_CORE_CONFIGURATION_H
28727 #define _XTENSA_CORE_CONFIGURATION_H
28728
28729 +#include <linux/const.h>
28730
28731 /****************************************************************************
28732 Parameters Useful for Any Code, USER or PRIVILEGED
28733 @@ -118,9 +119,9 @@
28734 ----------------------------------------------------------------------*/
28735
28736 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28737 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28738 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28739 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28740 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28741
28742 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
28743 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
28744 diff --git a/block/blk-integrity.c b/block/blk-integrity.c
28745 index 15c6308..96e83c2 100644
28746 --- a/block/blk-integrity.c
28747 +++ b/block/blk-integrity.c
28748 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
28749 NULL,
28750 };
28751
28752 -static struct sysfs_ops integrity_ops = {
28753 +static const struct sysfs_ops integrity_ops = {
28754 .show = &integrity_attr_show,
28755 .store = &integrity_attr_store,
28756 };
28757 diff --git a/block/blk-ioc.c b/block/blk-ioc.c
28758 index d4ed600..cbdabb0 100644
28759 --- a/block/blk-ioc.c
28760 +++ b/block/blk-ioc.c
28761 @@ -66,22 +66,22 @@ static void cfq_exit(struct io_context *ioc)
28762 }
28763
28764 /* Called by the exitting task */
28765 -void exit_io_context(void)
28766 +void exit_io_context(struct task_struct *task)
28767 {
28768 struct io_context *ioc;
28769
28770 - task_lock(current);
28771 - ioc = current->io_context;
28772 - current->io_context = NULL;
28773 - task_unlock(current);
28774 + task_lock(task);
28775 + ioc = task->io_context;
28776 + task->io_context = NULL;
28777 + task_unlock(task);
28778
28779 if (atomic_dec_and_test(&ioc->nr_tasks)) {
28780 if (ioc->aic && ioc->aic->exit)
28781 ioc->aic->exit(ioc->aic);
28782 cfq_exit(ioc);
28783
28784 - put_io_context(ioc);
28785 }
28786 + put_io_context(ioc);
28787 }
28788
28789 struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
28790 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
28791 index ca56420..f2fc409 100644
28792 --- a/block/blk-iopoll.c
28793 +++ b/block/blk-iopoll.c
28794 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
28795 }
28796 EXPORT_SYMBOL(blk_iopoll_complete);
28797
28798 -static void blk_iopoll_softirq(struct softirq_action *h)
28799 +static void blk_iopoll_softirq(void)
28800 {
28801 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
28802 int rearm = 0, budget = blk_iopoll_budget;
28803 diff --git a/block/blk-map.c b/block/blk-map.c
28804 index 30a7e51..0aeec6a 100644
28805 --- a/block/blk-map.c
28806 +++ b/block/blk-map.c
28807 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
28808 * direct dma. else, set up kernel bounce buffers
28809 */
28810 uaddr = (unsigned long) ubuf;
28811 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
28812 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
28813 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
28814 else
28815 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
28816 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
28817 for (i = 0; i < iov_count; i++) {
28818 unsigned long uaddr = (unsigned long)iov[i].iov_base;
28819
28820 + if (!iov[i].iov_len)
28821 + return -EINVAL;
28822 +
28823 if (uaddr & queue_dma_alignment(q)) {
28824 unaligned = 1;
28825 break;
28826 }
28827 - if (!iov[i].iov_len)
28828 - return -EINVAL;
28829 }
28830
28831 if (unaligned || (q->dma_pad_mask & len) || map_data)
28832 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
28833 if (!len || !kbuf)
28834 return -EINVAL;
28835
28836 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
28837 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
28838 if (do_copy)
28839 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
28840 else
28841 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
28842 index ee9c216..58d410a 100644
28843 --- a/block/blk-softirq.c
28844 +++ b/block/blk-softirq.c
28845 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
28846 * Softirq action handler - move entries to local list and loop over them
28847 * while passing them to the queue registered handler.
28848 */
28849 -static void blk_done_softirq(struct softirq_action *h)
28850 +static void blk_done_softirq(void)
28851 {
28852 struct list_head *cpu_list, local_list;
28853
28854 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
28855 index bb9c5ea..5330d48 100644
28856 --- a/block/blk-sysfs.c
28857 +++ b/block/blk-sysfs.c
28858 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
28859 kmem_cache_free(blk_requestq_cachep, q);
28860 }
28861
28862 -static struct sysfs_ops queue_sysfs_ops = {
28863 +static const struct sysfs_ops queue_sysfs_ops = {
28864 .show = queue_attr_show,
28865 .store = queue_attr_store,
28866 };
28867 diff --git a/block/bsg.c b/block/bsg.c
28868 index e3e3241..759ebf7 100644
28869 --- a/block/bsg.c
28870 +++ b/block/bsg.c
28871 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
28872 struct sg_io_v4 *hdr, struct bsg_device *bd,
28873 fmode_t has_write_perm)
28874 {
28875 + unsigned char tmpcmd[sizeof(rq->__cmd)];
28876 + unsigned char *cmdptr;
28877 +
28878 if (hdr->request_len > BLK_MAX_CDB) {
28879 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
28880 if (!rq->cmd)
28881 return -ENOMEM;
28882 - }
28883 + cmdptr = rq->cmd;
28884 + } else
28885 + cmdptr = tmpcmd;
28886
28887 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
28888 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
28889 hdr->request_len))
28890 return -EFAULT;
28891
28892 + if (cmdptr != rq->cmd)
28893 + memcpy(rq->cmd, cmdptr, hdr->request_len);
28894 +
28895 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
28896 if (blk_verify_command(rq->cmd, has_write_perm))
28897 return -EPERM;
28898 @@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28899 rq->next_rq = next_rq;
28900 next_rq->cmd_type = rq->cmd_type;
28901
28902 - dxferp = (void*)(unsigned long)hdr->din_xferp;
28903 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28904 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
28905 hdr->din_xfer_len, GFP_KERNEL);
28906 if (ret)
28907 @@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28908
28909 if (hdr->dout_xfer_len) {
28910 dxfer_len = hdr->dout_xfer_len;
28911 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
28912 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
28913 } else if (hdr->din_xfer_len) {
28914 dxfer_len = hdr->din_xfer_len;
28915 - dxferp = (void*)(unsigned long)hdr->din_xferp;
28916 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28917 } else
28918 dxfer_len = 0;
28919
28920 @@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
28921 int len = min_t(unsigned int, hdr->max_response_len,
28922 rq->sense_len);
28923
28924 - ret = copy_to_user((void*)(unsigned long)hdr->response,
28925 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
28926 rq->sense, len);
28927 if (!ret)
28928 hdr->response_len = len;
28929 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
28930 index 9bd086c..ca1fc22 100644
28931 --- a/block/compat_ioctl.c
28932 +++ b/block/compat_ioctl.c
28933 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
28934 err |= __get_user(f->spec1, &uf->spec1);
28935 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
28936 err |= __get_user(name, &uf->name);
28937 - f->name = compat_ptr(name);
28938 + f->name = (void __force_kernel *)compat_ptr(name);
28939 if (err) {
28940 err = -EFAULT;
28941 goto out;
28942 diff --git a/block/elevator.c b/block/elevator.c
28943 index a847046..75a1746 100644
28944 --- a/block/elevator.c
28945 +++ b/block/elevator.c
28946 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
28947 return error;
28948 }
28949
28950 -static struct sysfs_ops elv_sysfs_ops = {
28951 +static const struct sysfs_ops elv_sysfs_ops = {
28952 .show = elv_attr_show,
28953 .store = elv_attr_store,
28954 };
28955 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
28956 index 2be0a97..bded3fd 100644
28957 --- a/block/scsi_ioctl.c
28958 +++ b/block/scsi_ioctl.c
28959 @@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
28960 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
28961 struct sg_io_hdr *hdr, fmode_t mode)
28962 {
28963 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
28964 + unsigned char tmpcmd[sizeof(rq->__cmd)];
28965 + unsigned char *cmdptr;
28966 +
28967 + if (rq->cmd != rq->__cmd)
28968 + cmdptr = rq->cmd;
28969 + else
28970 + cmdptr = tmpcmd;
28971 +
28972 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
28973 return -EFAULT;
28974 +
28975 + if (cmdptr != rq->cmd)
28976 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
28977 +
28978 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
28979 return -EPERM;
28980
28981 @@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28982 int err;
28983 unsigned int in_len, out_len, bytes, opcode, cmdlen;
28984 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
28985 + unsigned char tmpcmd[sizeof(rq->__cmd)];
28986 + unsigned char *cmdptr;
28987
28988 if (!sic)
28989 return -EINVAL;
28990 @@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28991 */
28992 err = -EFAULT;
28993 rq->cmd_len = cmdlen;
28994 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
28995 +
28996 + if (rq->cmd != rq->__cmd)
28997 + cmdptr = rq->cmd;
28998 + else
28999 + cmdptr = tmpcmd;
29000 +
29001 + if (copy_from_user(cmdptr, sic->data, cmdlen))
29002 goto error;
29003
29004 + if (rq->cmd != cmdptr)
29005 + memcpy(rq->cmd, cmdptr, cmdlen);
29006 +
29007 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
29008 goto error;
29009
29010 diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
29011 index f6f0833..514d986 100644
29012 --- a/crypto/ablkcipher.c
29013 +++ b/crypto/ablkcipher.c
29014 @@ -29,6 +29,8 @@
29015 static const char *skcipher_default_geniv __read_mostly;
29016
29017 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
29018 + unsigned int keylen) __size_overflow(3);
29019 +static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
29020 unsigned int keylen)
29021 {
29022 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
29023 @@ -51,6 +53,8 @@ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
29024 }
29025
29026 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
29027 + unsigned int keylen) __size_overflow(3);
29028 +static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
29029 unsigned int keylen)
29030 {
29031 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
29032 diff --git a/crypto/aead.c b/crypto/aead.c
29033 index 0a55da7..9256a04 100644
29034 --- a/crypto/aead.c
29035 +++ b/crypto/aead.c
29036 @@ -25,6 +25,8 @@
29037 #include "internal.h"
29038
29039 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
29040 + unsigned int keylen) __size_overflow(3);
29041 +static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
29042 unsigned int keylen)
29043 {
29044 struct aead_alg *aead = crypto_aead_alg(tfm);
29045 @@ -46,6 +48,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
29046 return ret;
29047 }
29048
29049 +static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
29050 static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
29051 {
29052 struct aead_alg *aead = crypto_aead_alg(tfm);
29053 diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
29054 index 90d26c9..3db7c03 100644
29055 --- a/crypto/blkcipher.c
29056 +++ b/crypto/blkcipher.c
29057 @@ -357,6 +357,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
29058 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
29059
29060 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29061 + unsigned int keylen) __size_overflow(3);
29062 +static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29063 unsigned int keylen)
29064 {
29065 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
29066 @@ -378,6 +380,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29067 return ret;
29068 }
29069
29070 +static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
29071 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
29072 {
29073 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
29074 diff --git a/crypto/cipher.c b/crypto/cipher.c
29075 index 9a1a731..41454c2 100644
29076 --- a/crypto/cipher.c
29077 +++ b/crypto/cipher.c
29078 @@ -21,6 +21,8 @@
29079 #include "internal.h"
29080
29081 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29082 + unsigned int keylen) __size_overflow(3);
29083 +static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29084 unsigned int keylen)
29085 {
29086 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
29087 @@ -43,6 +45,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29088
29089 }
29090
29091 +static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
29092 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
29093 {
29094 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
29095 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
29096 index 3533582..f143117 100644
29097 --- a/crypto/cryptd.c
29098 +++ b/crypto/cryptd.c
29099 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
29100
29101 struct cryptd_blkcipher_request_ctx {
29102 crypto_completion_t complete;
29103 -};
29104 +} __no_const;
29105
29106 struct cryptd_hash_ctx {
29107 struct crypto_shash *child;
29108 diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
29109 index a90d260..7a9765e 100644
29110 --- a/crypto/gf128mul.c
29111 +++ b/crypto/gf128mul.c
29112 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
29113 for (i = 0; i < 7; ++i)
29114 gf128mul_x_lle(&p[i + 1], &p[i]);
29115
29116 - memset(r, 0, sizeof(r));
29117 + memset(r, 0, sizeof(*r));
29118 for (i = 0;;) {
29119 u8 ch = ((u8 *)b)[15 - i];
29120
29121 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
29122 for (i = 0; i < 7; ++i)
29123 gf128mul_x_bbe(&p[i + 1], &p[i]);
29124
29125 - memset(r, 0, sizeof(r));
29126 + memset(r, 0, sizeof(*r));
29127 for (i = 0;;) {
29128 u8 ch = ((u8 *)b)[i];
29129
29130 diff --git a/crypto/serpent.c b/crypto/serpent.c
29131 index b651a55..023297d 100644
29132 --- a/crypto/serpent.c
29133 +++ b/crypto/serpent.c
29134 @@ -21,6 +21,7 @@
29135 #include <asm/byteorder.h>
29136 #include <linux/crypto.h>
29137 #include <linux/types.h>
29138 +#include <linux/sched.h>
29139
29140 /* Key is padded to the maximum of 256 bits before round key generation.
29141 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
29142 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
29143 u32 r0,r1,r2,r3,r4;
29144 int i;
29145
29146 + pax_track_stack();
29147 +
29148 /* Copy key, add padding */
29149
29150 for (i = 0; i < keylen; ++i)
29151 diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
29152 index 0d2cdb8..d8de48d 100644
29153 --- a/drivers/acpi/acpi_pad.c
29154 +++ b/drivers/acpi/acpi_pad.c
29155 @@ -30,7 +30,7 @@
29156 #include <acpi/acpi_bus.h>
29157 #include <acpi/acpi_drivers.h>
29158
29159 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
29160 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
29161 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
29162 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
29163 static DEFINE_MUTEX(isolated_cpus_lock);
29164 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
29165 index 3f4602b..1978af1 100644
29166 --- a/drivers/acpi/battery.c
29167 +++ b/drivers/acpi/battery.c
29168 @@ -678,6 +678,9 @@ static int acpi_battery_print_alarm(struct seq_file *seq, int result)
29169
29170 static ssize_t acpi_battery_write_alarm(struct file *file,
29171 const char __user * buffer,
29172 + size_t count, loff_t * ppos) __size_overflow(3);
29173 +static ssize_t acpi_battery_write_alarm(struct file *file,
29174 + const char __user * buffer,
29175 size_t count, loff_t * ppos)
29176 {
29177 int result = 0;
29178 @@ -763,7 +766,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
29179 }
29180
29181 static struct battery_file {
29182 - struct file_operations ops;
29183 + const struct file_operations ops;
29184 mode_t mode;
29185 const char *name;
29186 } acpi_battery_file[] = {
29187 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
29188 index 7338b6a..82f0257 100644
29189 --- a/drivers/acpi/dock.c
29190 +++ b/drivers/acpi/dock.c
29191 @@ -77,7 +77,7 @@ struct dock_dependent_device {
29192 struct list_head list;
29193 struct list_head hotplug_list;
29194 acpi_handle handle;
29195 - struct acpi_dock_ops *ops;
29196 + const struct acpi_dock_ops *ops;
29197 void *context;
29198 };
29199
29200 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
29201 * the dock driver after _DCK is executed.
29202 */
29203 int
29204 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
29205 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
29206 void *context)
29207 {
29208 struct dock_dependent_device *dd;
29209 diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
29210 index 7c1c59e..2993595 100644
29211 --- a/drivers/acpi/osl.c
29212 +++ b/drivers/acpi/osl.c
29213 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
29214 void __iomem *virt_addr;
29215
29216 virt_addr = ioremap(phys_addr, width);
29217 + if (!virt_addr)
29218 + return AE_NO_MEMORY;
29219 if (!value)
29220 value = &dummy;
29221
29222 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
29223 void __iomem *virt_addr;
29224
29225 virt_addr = ioremap(phys_addr, width);
29226 + if (!virt_addr)
29227 + return AE_NO_MEMORY;
29228
29229 switch (width) {
29230 case 8:
29231 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
29232 index c216062..eec10d2 100644
29233 --- a/drivers/acpi/power_meter.c
29234 +++ b/drivers/acpi/power_meter.c
29235 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29236 return res;
29237
29238 temp /= 1000;
29239 - if (temp < 0)
29240 - return -EINVAL;
29241
29242 mutex_lock(&resource->lock);
29243 resource->trip[attr->index - 7] = temp;
29244 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
29245 index d0d25e2..961643d 100644
29246 --- a/drivers/acpi/proc.c
29247 +++ b/drivers/acpi/proc.c
29248 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
29249 size_t count, loff_t * ppos)
29250 {
29251 struct list_head *node, *next;
29252 - char strbuf[5];
29253 - char str[5] = "";
29254 - unsigned int len = count;
29255 + char strbuf[5] = {0};
29256 struct acpi_device *found_dev = NULL;
29257
29258 - if (len > 4)
29259 - len = 4;
29260 - if (len < 0)
29261 - return -EFAULT;
29262 + if (count > 4)
29263 + count = 4;
29264
29265 - if (copy_from_user(strbuf, buffer, len))
29266 + if (copy_from_user(strbuf, buffer, count))
29267 return -EFAULT;
29268 - strbuf[len] = '\0';
29269 - sscanf(strbuf, "%s", str);
29270 + strbuf[count] = '\0';
29271
29272 mutex_lock(&acpi_device_lock);
29273 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
29274 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
29275 if (!dev->wakeup.flags.valid)
29276 continue;
29277
29278 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
29279 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
29280 dev->wakeup.state.enabled =
29281 dev->wakeup.state.enabled ? 0 : 1;
29282 found_dev = dev;
29283 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
29284 index 7102474..de8ad22 100644
29285 --- a/drivers/acpi/processor_core.c
29286 +++ b/drivers/acpi/processor_core.c
29287 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
29288 return 0;
29289 }
29290
29291 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
29292 + BUG_ON(pr->id >= nr_cpu_ids);
29293
29294 /*
29295 * Buggy BIOS check
29296 diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
29297 index 52b9db8..a519aab 100644
29298 --- a/drivers/acpi/sbs.c
29299 +++ b/drivers/acpi/sbs.c
29300 @@ -647,6 +647,9 @@ static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
29301
29302 static ssize_t
29303 acpi_battery_write_alarm(struct file *file, const char __user * buffer,
29304 + size_t count, loff_t * ppos) __size_overflow(3);
29305 +static ssize_t
29306 +acpi_battery_write_alarm(struct file *file, const char __user * buffer,
29307 size_t count, loff_t * ppos)
29308 {
29309 struct seq_file *seq = file->private_data;
29310 diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
29311 index d933980..5761f13 100644
29312 --- a/drivers/acpi/sbshc.c
29313 +++ b/drivers/acpi/sbshc.c
29314 @@ -17,7 +17,7 @@
29315
29316 #define PREFIX "ACPI: "
29317
29318 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
29319 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
29320 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
29321
29322 struct acpi_smb_hc {
29323 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
29324 index 0458094..6978e7b 100644
29325 --- a/drivers/acpi/sleep.c
29326 +++ b/drivers/acpi/sleep.c
29327 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
29328 }
29329 }
29330
29331 -static struct platform_suspend_ops acpi_suspend_ops = {
29332 +static const struct platform_suspend_ops acpi_suspend_ops = {
29333 .valid = acpi_suspend_state_valid,
29334 .begin = acpi_suspend_begin,
29335 .prepare_late = acpi_pm_prepare,
29336 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
29337 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
29338 * been requested.
29339 */
29340 -static struct platform_suspend_ops acpi_suspend_ops_old = {
29341 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
29342 .valid = acpi_suspend_state_valid,
29343 .begin = acpi_suspend_begin_old,
29344 .prepare_late = acpi_pm_disable_gpes,
29345 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
29346 acpi_enable_all_runtime_gpes();
29347 }
29348
29349 -static struct platform_hibernation_ops acpi_hibernation_ops = {
29350 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
29351 .begin = acpi_hibernation_begin,
29352 .end = acpi_pm_end,
29353 .pre_snapshot = acpi_hibernation_pre_snapshot,
29354 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
29355 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
29356 * been requested.
29357 */
29358 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
29359 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
29360 .begin = acpi_hibernation_begin_old,
29361 .end = acpi_pm_end,
29362 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
29363 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
29364 index 05dff63..b662ab7 100644
29365 --- a/drivers/acpi/video.c
29366 +++ b/drivers/acpi/video.c
29367 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
29368 vd->brightness->levels[request_level]);
29369 }
29370
29371 -static struct backlight_ops acpi_backlight_ops = {
29372 +static const struct backlight_ops acpi_backlight_ops = {
29373 .get_brightness = acpi_video_get_brightness,
29374 .update_status = acpi_video_set_brightness,
29375 };
29376 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
29377 index 6787aab..23ffb0e 100644
29378 --- a/drivers/ata/ahci.c
29379 +++ b/drivers/ata/ahci.c
29380 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
29381 .sdev_attrs = ahci_sdev_attrs,
29382 };
29383
29384 -static struct ata_port_operations ahci_ops = {
29385 +static const struct ata_port_operations ahci_ops = {
29386 .inherits = &sata_pmp_port_ops,
29387
29388 .qc_defer = sata_pmp_qc_defer_cmd_switch,
29389 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
29390 .port_stop = ahci_port_stop,
29391 };
29392
29393 -static struct ata_port_operations ahci_vt8251_ops = {
29394 +static const struct ata_port_operations ahci_vt8251_ops = {
29395 .inherits = &ahci_ops,
29396 .hardreset = ahci_vt8251_hardreset,
29397 };
29398
29399 -static struct ata_port_operations ahci_p5wdh_ops = {
29400 +static const struct ata_port_operations ahci_p5wdh_ops = {
29401 .inherits = &ahci_ops,
29402 .hardreset = ahci_p5wdh_hardreset,
29403 };
29404
29405 -static struct ata_port_operations ahci_sb600_ops = {
29406 +static const struct ata_port_operations ahci_sb600_ops = {
29407 .inherits = &ahci_ops,
29408 .softreset = ahci_sb600_softreset,
29409 .pmp_softreset = ahci_sb600_softreset,
29410 diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
29411 index 99e7196..4968c77 100644
29412 --- a/drivers/ata/ata_generic.c
29413 +++ b/drivers/ata/ata_generic.c
29414 @@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
29415 ATA_BMDMA_SHT(DRV_NAME),
29416 };
29417
29418 -static struct ata_port_operations generic_port_ops = {
29419 +static const struct ata_port_operations generic_port_ops = {
29420 .inherits = &ata_bmdma_port_ops,
29421 .cable_detect = ata_cable_unknown,
29422 .set_mode = generic_set_mode,
29423 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
29424 index c33591d..000c121 100644
29425 --- a/drivers/ata/ata_piix.c
29426 +++ b/drivers/ata/ata_piix.c
29427 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
29428 ATA_BMDMA_SHT(DRV_NAME),
29429 };
29430
29431 -static struct ata_port_operations piix_pata_ops = {
29432 +static const struct ata_port_operations piix_pata_ops = {
29433 .inherits = &ata_bmdma32_port_ops,
29434 .cable_detect = ata_cable_40wire,
29435 .set_piomode = piix_set_piomode,
29436 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
29437 .prereset = piix_pata_prereset,
29438 };
29439
29440 -static struct ata_port_operations piix_vmw_ops = {
29441 +static const struct ata_port_operations piix_vmw_ops = {
29442 .inherits = &piix_pata_ops,
29443 .bmdma_status = piix_vmw_bmdma_status,
29444 };
29445
29446 -static struct ata_port_operations ich_pata_ops = {
29447 +static const struct ata_port_operations ich_pata_ops = {
29448 .inherits = &piix_pata_ops,
29449 .cable_detect = ich_pata_cable_detect,
29450 .set_dmamode = ich_set_dmamode,
29451 };
29452
29453 -static struct ata_port_operations piix_sata_ops = {
29454 +static const struct ata_port_operations piix_sata_ops = {
29455 .inherits = &ata_bmdma_port_ops,
29456 };
29457
29458 -static struct ata_port_operations piix_sidpr_sata_ops = {
29459 +static const struct ata_port_operations piix_sidpr_sata_ops = {
29460 .inherits = &piix_sata_ops,
29461 .hardreset = sata_std_hardreset,
29462 .scr_read = piix_sidpr_scr_read,
29463 diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
29464 index b0882cd..c295d65 100644
29465 --- a/drivers/ata/libata-acpi.c
29466 +++ b/drivers/ata/libata-acpi.c
29467 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
29468 ata_acpi_uevent(dev->link->ap, dev, event);
29469 }
29470
29471 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
29472 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
29473 .handler = ata_acpi_dev_notify_dock,
29474 .uevent = ata_acpi_dev_uevent,
29475 };
29476
29477 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
29478 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
29479 .handler = ata_acpi_ap_notify_dock,
29480 .uevent = ata_acpi_ap_uevent,
29481 };
29482 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
29483 index d4f7f99..94f603e 100644
29484 --- a/drivers/ata/libata-core.c
29485 +++ b/drivers/ata/libata-core.c
29486 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
29487 struct ata_port *ap;
29488 unsigned int tag;
29489
29490 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29491 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29492 ap = qc->ap;
29493
29494 qc->flags = 0;
29495 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
29496 struct ata_port *ap;
29497 struct ata_link *link;
29498
29499 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29500 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29501 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
29502 ap = qc->ap;
29503 link = qc->dev->link;
29504 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
29505 * LOCKING:
29506 * None.
29507 */
29508 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
29509 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
29510 {
29511 static DEFINE_SPINLOCK(lock);
29512 const struct ata_port_operations *cur;
29513 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29514 return;
29515
29516 spin_lock(&lock);
29517 + pax_open_kernel();
29518
29519 for (cur = ops->inherits; cur; cur = cur->inherits) {
29520 void **inherit = (void **)cur;
29521 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29522 if (IS_ERR(*pp))
29523 *pp = NULL;
29524
29525 - ops->inherits = NULL;
29526 + *(struct ata_port_operations **)&ops->inherits = NULL;
29527
29528 + pax_close_kernel();
29529 spin_unlock(&lock);
29530 }
29531
29532 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
29533 */
29534 /* KILLME - the only user left is ipr */
29535 void ata_host_init(struct ata_host *host, struct device *dev,
29536 - unsigned long flags, struct ata_port_operations *ops)
29537 + unsigned long flags, const struct ata_port_operations *ops)
29538 {
29539 spin_lock_init(&host->lock);
29540 host->dev = dev;
29541 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
29542 /* truly dummy */
29543 }
29544
29545 -struct ata_port_operations ata_dummy_port_ops = {
29546 +const struct ata_port_operations ata_dummy_port_ops = {
29547 .qc_prep = ata_noop_qc_prep,
29548 .qc_issue = ata_dummy_qc_issue,
29549 .error_handler = ata_dummy_error_handler,
29550 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
29551 index e5bdb9b..45a8e72 100644
29552 --- a/drivers/ata/libata-eh.c
29553 +++ b/drivers/ata/libata-eh.c
29554 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
29555 {
29556 struct ata_link *link;
29557
29558 + pax_track_stack();
29559 +
29560 ata_for_each_link(link, ap, HOST_FIRST)
29561 ata_eh_link_report(link);
29562 }
29563 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
29564 */
29565 void ata_std_error_handler(struct ata_port *ap)
29566 {
29567 - struct ata_port_operations *ops = ap->ops;
29568 + const struct ata_port_operations *ops = ap->ops;
29569 ata_reset_fn_t hardreset = ops->hardreset;
29570
29571 /* ignore built-in hardreset if SCR access is not available */
29572 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
29573 index 51f0ffb..19ce3e3 100644
29574 --- a/drivers/ata/libata-pmp.c
29575 +++ b/drivers/ata/libata-pmp.c
29576 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
29577 */
29578 static int sata_pmp_eh_recover(struct ata_port *ap)
29579 {
29580 - struct ata_port_operations *ops = ap->ops;
29581 + const struct ata_port_operations *ops = ap->ops;
29582 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
29583 struct ata_link *pmp_link = &ap->link;
29584 struct ata_device *pmp_dev = pmp_link->device;
29585 diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
29586 index d8f35fe..288180a 100644
29587 --- a/drivers/ata/pata_acpi.c
29588 +++ b/drivers/ata/pata_acpi.c
29589 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
29590 ATA_BMDMA_SHT(DRV_NAME),
29591 };
29592
29593 -static struct ata_port_operations pacpi_ops = {
29594 +static const struct ata_port_operations pacpi_ops = {
29595 .inherits = &ata_bmdma_port_ops,
29596 .qc_issue = pacpi_qc_issue,
29597 .cable_detect = pacpi_cable_detect,
29598 diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
29599 index 9434114..1f2f364 100644
29600 --- a/drivers/ata/pata_ali.c
29601 +++ b/drivers/ata/pata_ali.c
29602 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
29603 * Port operations for PIO only ALi
29604 */
29605
29606 -static struct ata_port_operations ali_early_port_ops = {
29607 +static const struct ata_port_operations ali_early_port_ops = {
29608 .inherits = &ata_sff_port_ops,
29609 .cable_detect = ata_cable_40wire,
29610 .set_piomode = ali_set_piomode,
29611 @@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
29612 * Port operations for DMA capable ALi without cable
29613 * detect
29614 */
29615 -static struct ata_port_operations ali_20_port_ops = {
29616 +static const struct ata_port_operations ali_20_port_ops = {
29617 .inherits = &ali_dma_base_ops,
29618 .cable_detect = ata_cable_40wire,
29619 .mode_filter = ali_20_filter,
29620 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
29621 /*
29622 * Port operations for DMA capable ALi with cable detect
29623 */
29624 -static struct ata_port_operations ali_c2_port_ops = {
29625 +static const struct ata_port_operations ali_c2_port_ops = {
29626 .inherits = &ali_dma_base_ops,
29627 .check_atapi_dma = ali_check_atapi_dma,
29628 .cable_detect = ali_c2_cable_detect,
29629 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
29630 /*
29631 * Port operations for DMA capable ALi with cable detect
29632 */
29633 -static struct ata_port_operations ali_c4_port_ops = {
29634 +static const struct ata_port_operations ali_c4_port_ops = {
29635 .inherits = &ali_dma_base_ops,
29636 .check_atapi_dma = ali_check_atapi_dma,
29637 .cable_detect = ali_c2_cable_detect,
29638 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
29639 /*
29640 * Port operations for DMA capable ALi with cable detect and LBA48
29641 */
29642 -static struct ata_port_operations ali_c5_port_ops = {
29643 +static const struct ata_port_operations ali_c5_port_ops = {
29644 .inherits = &ali_dma_base_ops,
29645 .check_atapi_dma = ali_check_atapi_dma,
29646 .dev_config = ali_warn_atapi_dma,
29647 diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
29648 index 567f3f7..c8ee0da 100644
29649 --- a/drivers/ata/pata_amd.c
29650 +++ b/drivers/ata/pata_amd.c
29651 @@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
29652 .prereset = amd_pre_reset,
29653 };
29654
29655 -static struct ata_port_operations amd33_port_ops = {
29656 +static const struct ata_port_operations amd33_port_ops = {
29657 .inherits = &amd_base_port_ops,
29658 .cable_detect = ata_cable_40wire,
29659 .set_piomode = amd33_set_piomode,
29660 .set_dmamode = amd33_set_dmamode,
29661 };
29662
29663 -static struct ata_port_operations amd66_port_ops = {
29664 +static const struct ata_port_operations amd66_port_ops = {
29665 .inherits = &amd_base_port_ops,
29666 .cable_detect = ata_cable_unknown,
29667 .set_piomode = amd66_set_piomode,
29668 .set_dmamode = amd66_set_dmamode,
29669 };
29670
29671 -static struct ata_port_operations amd100_port_ops = {
29672 +static const struct ata_port_operations amd100_port_ops = {
29673 .inherits = &amd_base_port_ops,
29674 .cable_detect = ata_cable_unknown,
29675 .set_piomode = amd100_set_piomode,
29676 .set_dmamode = amd100_set_dmamode,
29677 };
29678
29679 -static struct ata_port_operations amd133_port_ops = {
29680 +static const struct ata_port_operations amd133_port_ops = {
29681 .inherits = &amd_base_port_ops,
29682 .cable_detect = amd_cable_detect,
29683 .set_piomode = amd133_set_piomode,
29684 @@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
29685 .host_stop = nv_host_stop,
29686 };
29687
29688 -static struct ata_port_operations nv100_port_ops = {
29689 +static const struct ata_port_operations nv100_port_ops = {
29690 .inherits = &nv_base_port_ops,
29691 .set_piomode = nv100_set_piomode,
29692 .set_dmamode = nv100_set_dmamode,
29693 };
29694
29695 -static struct ata_port_operations nv133_port_ops = {
29696 +static const struct ata_port_operations nv133_port_ops = {
29697 .inherits = &nv_base_port_ops,
29698 .set_piomode = nv133_set_piomode,
29699 .set_dmamode = nv133_set_dmamode,
29700 diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
29701 index d332cfd..4b7eaae 100644
29702 --- a/drivers/ata/pata_artop.c
29703 +++ b/drivers/ata/pata_artop.c
29704 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
29705 ATA_BMDMA_SHT(DRV_NAME),
29706 };
29707
29708 -static struct ata_port_operations artop6210_ops = {
29709 +static const struct ata_port_operations artop6210_ops = {
29710 .inherits = &ata_bmdma_port_ops,
29711 .cable_detect = ata_cable_40wire,
29712 .set_piomode = artop6210_set_piomode,
29713 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
29714 .qc_defer = artop6210_qc_defer,
29715 };
29716
29717 -static struct ata_port_operations artop6260_ops = {
29718 +static const struct ata_port_operations artop6260_ops = {
29719 .inherits = &ata_bmdma_port_ops,
29720 .cable_detect = artop6260_cable_detect,
29721 .set_piomode = artop6260_set_piomode,
29722 diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
29723 index 5c129f9..7bb7ccb 100644
29724 --- a/drivers/ata/pata_at32.c
29725 +++ b/drivers/ata/pata_at32.c
29726 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
29727 ATA_PIO_SHT(DRV_NAME),
29728 };
29729
29730 -static struct ata_port_operations at32_port_ops = {
29731 +static const struct ata_port_operations at32_port_ops = {
29732 .inherits = &ata_sff_port_ops,
29733 .cable_detect = ata_cable_40wire,
29734 .set_piomode = pata_at32_set_piomode,
29735 diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
29736 index 41c94b1..829006d 100644
29737 --- a/drivers/ata/pata_at91.c
29738 +++ b/drivers/ata/pata_at91.c
29739 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
29740 ATA_PIO_SHT(DRV_NAME),
29741 };
29742
29743 -static struct ata_port_operations pata_at91_port_ops = {
29744 +static const struct ata_port_operations pata_at91_port_ops = {
29745 .inherits = &ata_sff_port_ops,
29746
29747 .sff_data_xfer = pata_at91_data_xfer_noirq,
29748 diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
29749 index ae4454d..d391eb4 100644
29750 --- a/drivers/ata/pata_atiixp.c
29751 +++ b/drivers/ata/pata_atiixp.c
29752 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
29753 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29754 };
29755
29756 -static struct ata_port_operations atiixp_port_ops = {
29757 +static const struct ata_port_operations atiixp_port_ops = {
29758 .inherits = &ata_bmdma_port_ops,
29759
29760 .qc_prep = ata_sff_dumb_qc_prep,
29761 diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
29762 index 6fe7ded..2a425dc 100644
29763 --- a/drivers/ata/pata_atp867x.c
29764 +++ b/drivers/ata/pata_atp867x.c
29765 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
29766 ATA_BMDMA_SHT(DRV_NAME),
29767 };
29768
29769 -static struct ata_port_operations atp867x_ops = {
29770 +static const struct ata_port_operations atp867x_ops = {
29771 .inherits = &ata_bmdma_port_ops,
29772 .cable_detect = atp867x_cable_detect,
29773 .set_piomode = atp867x_set_piomode,
29774 diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
29775 index c4b47a3..b27a367 100644
29776 --- a/drivers/ata/pata_bf54x.c
29777 +++ b/drivers/ata/pata_bf54x.c
29778 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
29779 .dma_boundary = ATA_DMA_BOUNDARY,
29780 };
29781
29782 -static struct ata_port_operations bfin_pata_ops = {
29783 +static const struct ata_port_operations bfin_pata_ops = {
29784 .inherits = &ata_sff_port_ops,
29785
29786 .set_piomode = bfin_set_piomode,
29787 diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
29788 index 5acf9fa..84248be 100644
29789 --- a/drivers/ata/pata_cmd640.c
29790 +++ b/drivers/ata/pata_cmd640.c
29791 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
29792 ATA_BMDMA_SHT(DRV_NAME),
29793 };
29794
29795 -static struct ata_port_operations cmd640_port_ops = {
29796 +static const struct ata_port_operations cmd640_port_ops = {
29797 .inherits = &ata_bmdma_port_ops,
29798 /* In theory xfer_noirq is not needed once we kill the prefetcher */
29799 .sff_data_xfer = ata_sff_data_xfer_noirq,
29800 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
29801 index ccd2694..c869c3d 100644
29802 --- a/drivers/ata/pata_cmd64x.c
29803 +++ b/drivers/ata/pata_cmd64x.c
29804 @@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
29805 .set_dmamode = cmd64x_set_dmamode,
29806 };
29807
29808 -static struct ata_port_operations cmd64x_port_ops = {
29809 +static const struct ata_port_operations cmd64x_port_ops = {
29810 .inherits = &cmd64x_base_ops,
29811 .cable_detect = ata_cable_40wire,
29812 };
29813
29814 -static struct ata_port_operations cmd646r1_port_ops = {
29815 +static const struct ata_port_operations cmd646r1_port_ops = {
29816 .inherits = &cmd64x_base_ops,
29817 .bmdma_stop = cmd646r1_bmdma_stop,
29818 .cable_detect = ata_cable_40wire,
29819 };
29820
29821 -static struct ata_port_operations cmd648_port_ops = {
29822 +static const struct ata_port_operations cmd648_port_ops = {
29823 .inherits = &cmd64x_base_ops,
29824 .bmdma_stop = cmd648_bmdma_stop,
29825 .cable_detect = cmd648_cable_detect,
29826 diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
29827 index 0df83cf..d7595b0 100644
29828 --- a/drivers/ata/pata_cs5520.c
29829 +++ b/drivers/ata/pata_cs5520.c
29830 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
29831 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29832 };
29833
29834 -static struct ata_port_operations cs5520_port_ops = {
29835 +static const struct ata_port_operations cs5520_port_ops = {
29836 .inherits = &ata_bmdma_port_ops,
29837 .qc_prep = ata_sff_dumb_qc_prep,
29838 .cable_detect = ata_cable_40wire,
29839 diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
29840 index c974b05..6d26b11 100644
29841 --- a/drivers/ata/pata_cs5530.c
29842 +++ b/drivers/ata/pata_cs5530.c
29843 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
29844 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29845 };
29846
29847 -static struct ata_port_operations cs5530_port_ops = {
29848 +static const struct ata_port_operations cs5530_port_ops = {
29849 .inherits = &ata_bmdma_port_ops,
29850
29851 .qc_prep = ata_sff_dumb_qc_prep,
29852 diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
29853 index 403f561..aacd26b 100644
29854 --- a/drivers/ata/pata_cs5535.c
29855 +++ b/drivers/ata/pata_cs5535.c
29856 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
29857 ATA_BMDMA_SHT(DRV_NAME),
29858 };
29859
29860 -static struct ata_port_operations cs5535_port_ops = {
29861 +static const struct ata_port_operations cs5535_port_ops = {
29862 .inherits = &ata_bmdma_port_ops,
29863 .cable_detect = cs5535_cable_detect,
29864 .set_piomode = cs5535_set_piomode,
29865 diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
29866 index 6da4cb4..de24a25 100644
29867 --- a/drivers/ata/pata_cs5536.c
29868 +++ b/drivers/ata/pata_cs5536.c
29869 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
29870 ATA_BMDMA_SHT(DRV_NAME),
29871 };
29872
29873 -static struct ata_port_operations cs5536_port_ops = {
29874 +static const struct ata_port_operations cs5536_port_ops = {
29875 .inherits = &ata_bmdma_port_ops,
29876 .cable_detect = cs5536_cable_detect,
29877 .set_piomode = cs5536_set_piomode,
29878 diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
29879 index 8fb040b..b16a9c9 100644
29880 --- a/drivers/ata/pata_cypress.c
29881 +++ b/drivers/ata/pata_cypress.c
29882 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
29883 ATA_BMDMA_SHT(DRV_NAME),
29884 };
29885
29886 -static struct ata_port_operations cy82c693_port_ops = {
29887 +static const struct ata_port_operations cy82c693_port_ops = {
29888 .inherits = &ata_bmdma_port_ops,
29889 .cable_detect = ata_cable_40wire,
29890 .set_piomode = cy82c693_set_piomode,
29891 diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
29892 index 2a6412f..555ee11 100644
29893 --- a/drivers/ata/pata_efar.c
29894 +++ b/drivers/ata/pata_efar.c
29895 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
29896 ATA_BMDMA_SHT(DRV_NAME),
29897 };
29898
29899 -static struct ata_port_operations efar_ops = {
29900 +static const struct ata_port_operations efar_ops = {
29901 .inherits = &ata_bmdma_port_ops,
29902 .cable_detect = efar_cable_detect,
29903 .set_piomode = efar_set_piomode,
29904 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
29905 index b9d8836..0b92030 100644
29906 --- a/drivers/ata/pata_hpt366.c
29907 +++ b/drivers/ata/pata_hpt366.c
29908 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
29909 * Configuration for HPT366/68
29910 */
29911
29912 -static struct ata_port_operations hpt366_port_ops = {
29913 +static const struct ata_port_operations hpt366_port_ops = {
29914 .inherits = &ata_bmdma_port_ops,
29915 .cable_detect = hpt36x_cable_detect,
29916 .mode_filter = hpt366_filter,
29917 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
29918 index 5af7f19..00c4980 100644
29919 --- a/drivers/ata/pata_hpt37x.c
29920 +++ b/drivers/ata/pata_hpt37x.c
29921 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
29922 * Configuration for HPT370
29923 */
29924
29925 -static struct ata_port_operations hpt370_port_ops = {
29926 +static const struct ata_port_operations hpt370_port_ops = {
29927 .inherits = &ata_bmdma_port_ops,
29928
29929 .bmdma_stop = hpt370_bmdma_stop,
29930 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
29931 * Configuration for HPT370A. Close to 370 but less filters
29932 */
29933
29934 -static struct ata_port_operations hpt370a_port_ops = {
29935 +static const struct ata_port_operations hpt370a_port_ops = {
29936 .inherits = &hpt370_port_ops,
29937 .mode_filter = hpt370a_filter,
29938 };
29939 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
29940 * and DMA mode setting functionality.
29941 */
29942
29943 -static struct ata_port_operations hpt372_port_ops = {
29944 +static const struct ata_port_operations hpt372_port_ops = {
29945 .inherits = &ata_bmdma_port_ops,
29946
29947 .bmdma_stop = hpt37x_bmdma_stop,
29948 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
29949 * but we have a different cable detection procedure for function 1.
29950 */
29951
29952 -static struct ata_port_operations hpt374_fn1_port_ops = {
29953 +static const struct ata_port_operations hpt374_fn1_port_ops = {
29954 .inherits = &hpt372_port_ops,
29955 .prereset = hpt374_fn1_pre_reset,
29956 };
29957 diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
29958 index 100f227..2e39382 100644
29959 --- a/drivers/ata/pata_hpt3x2n.c
29960 +++ b/drivers/ata/pata_hpt3x2n.c
29961 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
29962 * Configuration for HPT3x2n.
29963 */
29964
29965 -static struct ata_port_operations hpt3x2n_port_ops = {
29966 +static const struct ata_port_operations hpt3x2n_port_ops = {
29967 .inherits = &ata_bmdma_port_ops,
29968
29969 .bmdma_stop = hpt3x2n_bmdma_stop,
29970 diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
29971 index 7e31025..6fca8f4 100644
29972 --- a/drivers/ata/pata_hpt3x3.c
29973 +++ b/drivers/ata/pata_hpt3x3.c
29974 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
29975 ATA_BMDMA_SHT(DRV_NAME),
29976 };
29977
29978 -static struct ata_port_operations hpt3x3_port_ops = {
29979 +static const struct ata_port_operations hpt3x3_port_ops = {
29980 .inherits = &ata_bmdma_port_ops,
29981 .cable_detect = ata_cable_40wire,
29982 .set_piomode = hpt3x3_set_piomode,
29983 diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
29984 index b663b7f..9a26c2a 100644
29985 --- a/drivers/ata/pata_icside.c
29986 +++ b/drivers/ata/pata_icside.c
29987 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
29988 }
29989 }
29990
29991 -static struct ata_port_operations pata_icside_port_ops = {
29992 +static const struct ata_port_operations pata_icside_port_ops = {
29993 .inherits = &ata_sff_port_ops,
29994 /* no need to build any PRD tables for DMA */
29995 .qc_prep = ata_noop_qc_prep,
29996 diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
29997 index 4bceb88..457dfb6 100644
29998 --- a/drivers/ata/pata_isapnp.c
29999 +++ b/drivers/ata/pata_isapnp.c
30000 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
30001 ATA_PIO_SHT(DRV_NAME),
30002 };
30003
30004 -static struct ata_port_operations isapnp_port_ops = {
30005 +static const struct ata_port_operations isapnp_port_ops = {
30006 .inherits = &ata_sff_port_ops,
30007 .cable_detect = ata_cable_40wire,
30008 };
30009
30010 -static struct ata_port_operations isapnp_noalt_port_ops = {
30011 +static const struct ata_port_operations isapnp_noalt_port_ops = {
30012 .inherits = &ata_sff_port_ops,
30013 .cable_detect = ata_cable_40wire,
30014 /* No altstatus so we don't want to use the lost interrupt poll */
30015 diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
30016 index f156da8..24976e2 100644
30017 --- a/drivers/ata/pata_it8213.c
30018 +++ b/drivers/ata/pata_it8213.c
30019 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
30020 };
30021
30022
30023 -static struct ata_port_operations it8213_ops = {
30024 +static const struct ata_port_operations it8213_ops = {
30025 .inherits = &ata_bmdma_port_ops,
30026 .cable_detect = it8213_cable_detect,
30027 .set_piomode = it8213_set_piomode,
30028 diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
30029 index 188bc2f..ca9e785 100644
30030 --- a/drivers/ata/pata_it821x.c
30031 +++ b/drivers/ata/pata_it821x.c
30032 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
30033 ATA_BMDMA_SHT(DRV_NAME),
30034 };
30035
30036 -static struct ata_port_operations it821x_smart_port_ops = {
30037 +static const struct ata_port_operations it821x_smart_port_ops = {
30038 .inherits = &ata_bmdma_port_ops,
30039
30040 .check_atapi_dma= it821x_check_atapi_dma,
30041 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
30042 .port_start = it821x_port_start,
30043 };
30044
30045 -static struct ata_port_operations it821x_passthru_port_ops = {
30046 +static const struct ata_port_operations it821x_passthru_port_ops = {
30047 .inherits = &ata_bmdma_port_ops,
30048
30049 .check_atapi_dma= it821x_check_atapi_dma,
30050 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
30051 .port_start = it821x_port_start,
30052 };
30053
30054 -static struct ata_port_operations it821x_rdc_port_ops = {
30055 +static const struct ata_port_operations it821x_rdc_port_ops = {
30056 .inherits = &ata_bmdma_port_ops,
30057
30058 .check_atapi_dma= it821x_check_atapi_dma,
30059 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
30060 index ba54b08..4b952b7 100644
30061 --- a/drivers/ata/pata_ixp4xx_cf.c
30062 +++ b/drivers/ata/pata_ixp4xx_cf.c
30063 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
30064 ATA_PIO_SHT(DRV_NAME),
30065 };
30066
30067 -static struct ata_port_operations ixp4xx_port_ops = {
30068 +static const struct ata_port_operations ixp4xx_port_ops = {
30069 .inherits = &ata_sff_port_ops,
30070 .sff_data_xfer = ixp4xx_mmio_data_xfer,
30071 .cable_detect = ata_cable_40wire,
30072 diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
30073 index 3a1474a..434b0ff 100644
30074 --- a/drivers/ata/pata_jmicron.c
30075 +++ b/drivers/ata/pata_jmicron.c
30076 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
30077 ATA_BMDMA_SHT(DRV_NAME),
30078 };
30079
30080 -static struct ata_port_operations jmicron_ops = {
30081 +static const struct ata_port_operations jmicron_ops = {
30082 .inherits = &ata_bmdma_port_ops,
30083 .prereset = jmicron_pre_reset,
30084 };
30085 diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
30086 index 6932e56..220e71d 100644
30087 --- a/drivers/ata/pata_legacy.c
30088 +++ b/drivers/ata/pata_legacy.c
30089 @@ -106,7 +106,7 @@ struct legacy_probe {
30090
30091 struct legacy_controller {
30092 const char *name;
30093 - struct ata_port_operations *ops;
30094 + const struct ata_port_operations *ops;
30095 unsigned int pio_mask;
30096 unsigned int flags;
30097 unsigned int pflags;
30098 @@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
30099 * pio_mask as well.
30100 */
30101
30102 -static struct ata_port_operations simple_port_ops = {
30103 +static const struct ata_port_operations simple_port_ops = {
30104 .inherits = &legacy_base_port_ops,
30105 .sff_data_xfer = ata_sff_data_xfer_noirq,
30106 };
30107
30108 -static struct ata_port_operations legacy_port_ops = {
30109 +static const struct ata_port_operations legacy_port_ops = {
30110 .inherits = &legacy_base_port_ops,
30111 .sff_data_xfer = ata_sff_data_xfer_noirq,
30112 .set_mode = legacy_set_mode,
30113 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
30114 return buflen;
30115 }
30116
30117 -static struct ata_port_operations pdc20230_port_ops = {
30118 +static const struct ata_port_operations pdc20230_port_ops = {
30119 .inherits = &legacy_base_port_ops,
30120 .set_piomode = pdc20230_set_piomode,
30121 .sff_data_xfer = pdc_data_xfer_vlb,
30122 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
30123 ioread8(ap->ioaddr.status_addr);
30124 }
30125
30126 -static struct ata_port_operations ht6560a_port_ops = {
30127 +static const struct ata_port_operations ht6560a_port_ops = {
30128 .inherits = &legacy_base_port_ops,
30129 .set_piomode = ht6560a_set_piomode,
30130 };
30131 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
30132 ioread8(ap->ioaddr.status_addr);
30133 }
30134
30135 -static struct ata_port_operations ht6560b_port_ops = {
30136 +static const struct ata_port_operations ht6560b_port_ops = {
30137 .inherits = &legacy_base_port_ops,
30138 .set_piomode = ht6560b_set_piomode,
30139 };
30140 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
30141 }
30142
30143
30144 -static struct ata_port_operations opti82c611a_port_ops = {
30145 +static const struct ata_port_operations opti82c611a_port_ops = {
30146 .inherits = &legacy_base_port_ops,
30147 .set_piomode = opti82c611a_set_piomode,
30148 };
30149 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
30150 return ata_sff_qc_issue(qc);
30151 }
30152
30153 -static struct ata_port_operations opti82c46x_port_ops = {
30154 +static const struct ata_port_operations opti82c46x_port_ops = {
30155 .inherits = &legacy_base_port_ops,
30156 .set_piomode = opti82c46x_set_piomode,
30157 .qc_issue = opti82c46x_qc_issue,
30158 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
30159 return 0;
30160 }
30161
30162 -static struct ata_port_operations qdi6500_port_ops = {
30163 +static const struct ata_port_operations qdi6500_port_ops = {
30164 .inherits = &legacy_base_port_ops,
30165 .set_piomode = qdi6500_set_piomode,
30166 .qc_issue = qdi_qc_issue,
30167 .sff_data_xfer = vlb32_data_xfer,
30168 };
30169
30170 -static struct ata_port_operations qdi6580_port_ops = {
30171 +static const struct ata_port_operations qdi6580_port_ops = {
30172 .inherits = &legacy_base_port_ops,
30173 .set_piomode = qdi6580_set_piomode,
30174 .sff_data_xfer = vlb32_data_xfer,
30175 };
30176
30177 -static struct ata_port_operations qdi6580dp_port_ops = {
30178 +static const struct ata_port_operations qdi6580dp_port_ops = {
30179 .inherits = &legacy_base_port_ops,
30180 .set_piomode = qdi6580dp_set_piomode,
30181 .sff_data_xfer = vlb32_data_xfer,
30182 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
30183 return 0;
30184 }
30185
30186 -static struct ata_port_operations winbond_port_ops = {
30187 +static const struct ata_port_operations winbond_port_ops = {
30188 .inherits = &legacy_base_port_ops,
30189 .set_piomode = winbond_set_piomode,
30190 .sff_data_xfer = vlb32_data_xfer,
30191 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
30192 int pio_modes = controller->pio_mask;
30193 unsigned long io = probe->port;
30194 u32 mask = (1 << probe->slot);
30195 - struct ata_port_operations *ops = controller->ops;
30196 + const struct ata_port_operations *ops = controller->ops;
30197 struct legacy_data *ld = &legacy_data[probe->slot];
30198 struct ata_host *host = NULL;
30199 struct ata_port *ap;
30200 diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
30201 index 2096fb7..4d090fc 100644
30202 --- a/drivers/ata/pata_marvell.c
30203 +++ b/drivers/ata/pata_marvell.c
30204 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
30205 ATA_BMDMA_SHT(DRV_NAME),
30206 };
30207
30208 -static struct ata_port_operations marvell_ops = {
30209 +static const struct ata_port_operations marvell_ops = {
30210 .inherits = &ata_bmdma_port_ops,
30211 .cable_detect = marvell_cable_detect,
30212 .prereset = marvell_pre_reset,
30213 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
30214 index 99d41be..7d56aa8 100644
30215 --- a/drivers/ata/pata_mpc52xx.c
30216 +++ b/drivers/ata/pata_mpc52xx.c
30217 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
30218 ATA_PIO_SHT(DRV_NAME),
30219 };
30220
30221 -static struct ata_port_operations mpc52xx_ata_port_ops = {
30222 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
30223 .inherits = &ata_bmdma_port_ops,
30224 .sff_dev_select = mpc52xx_ata_dev_select,
30225 .set_piomode = mpc52xx_ata_set_piomode,
30226 diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
30227 index b21f002..0a27e7f 100644
30228 --- a/drivers/ata/pata_mpiix.c
30229 +++ b/drivers/ata/pata_mpiix.c
30230 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
30231 ATA_PIO_SHT(DRV_NAME),
30232 };
30233
30234 -static struct ata_port_operations mpiix_port_ops = {
30235 +static const struct ata_port_operations mpiix_port_ops = {
30236 .inherits = &ata_sff_port_ops,
30237 .qc_issue = mpiix_qc_issue,
30238 .cable_detect = ata_cable_40wire,
30239 diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
30240 index f0d52f7..89c3be3 100644
30241 --- a/drivers/ata/pata_netcell.c
30242 +++ b/drivers/ata/pata_netcell.c
30243 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
30244 ATA_BMDMA_SHT(DRV_NAME),
30245 };
30246
30247 -static struct ata_port_operations netcell_ops = {
30248 +static const struct ata_port_operations netcell_ops = {
30249 .inherits = &ata_bmdma_port_ops,
30250 .cable_detect = ata_cable_80wire,
30251 .read_id = netcell_read_id,
30252 diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
30253 index dd53a66..a3f4317 100644
30254 --- a/drivers/ata/pata_ninja32.c
30255 +++ b/drivers/ata/pata_ninja32.c
30256 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
30257 ATA_BMDMA_SHT(DRV_NAME),
30258 };
30259
30260 -static struct ata_port_operations ninja32_port_ops = {
30261 +static const struct ata_port_operations ninja32_port_ops = {
30262 .inherits = &ata_bmdma_port_ops,
30263 .sff_dev_select = ninja32_dev_select,
30264 .cable_detect = ata_cable_40wire,
30265 diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
30266 index ca53fac..9aa93ef 100644
30267 --- a/drivers/ata/pata_ns87410.c
30268 +++ b/drivers/ata/pata_ns87410.c
30269 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
30270 ATA_PIO_SHT(DRV_NAME),
30271 };
30272
30273 -static struct ata_port_operations ns87410_port_ops = {
30274 +static const struct ata_port_operations ns87410_port_ops = {
30275 .inherits = &ata_sff_port_ops,
30276 .qc_issue = ns87410_qc_issue,
30277 .cable_detect = ata_cable_40wire,
30278 diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
30279 index 773b159..55f454e 100644
30280 --- a/drivers/ata/pata_ns87415.c
30281 +++ b/drivers/ata/pata_ns87415.c
30282 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
30283 }
30284 #endif /* 87560 SuperIO Support */
30285
30286 -static struct ata_port_operations ns87415_pata_ops = {
30287 +static const struct ata_port_operations ns87415_pata_ops = {
30288 .inherits = &ata_bmdma_port_ops,
30289
30290 .check_atapi_dma = ns87415_check_atapi_dma,
30291 @@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
30292 };
30293
30294 #if defined(CONFIG_SUPERIO)
30295 -static struct ata_port_operations ns87560_pata_ops = {
30296 +static const struct ata_port_operations ns87560_pata_ops = {
30297 .inherits = &ns87415_pata_ops,
30298 .sff_tf_read = ns87560_tf_read,
30299 .sff_check_status = ns87560_check_status,
30300 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
30301 index d6f6956..639295b 100644
30302 --- a/drivers/ata/pata_octeon_cf.c
30303 +++ b/drivers/ata/pata_octeon_cf.c
30304 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
30305 return 0;
30306 }
30307
30308 +/* cannot be const */
30309 static struct ata_port_operations octeon_cf_ops = {
30310 .inherits = &ata_sff_port_ops,
30311 .check_atapi_dma = octeon_cf_check_atapi_dma,
30312 diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
30313 index 84ac503..adee1cd 100644
30314 --- a/drivers/ata/pata_oldpiix.c
30315 +++ b/drivers/ata/pata_oldpiix.c
30316 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
30317 ATA_BMDMA_SHT(DRV_NAME),
30318 };
30319
30320 -static struct ata_port_operations oldpiix_pata_ops = {
30321 +static const struct ata_port_operations oldpiix_pata_ops = {
30322 .inherits = &ata_bmdma_port_ops,
30323 .qc_issue = oldpiix_qc_issue,
30324 .cable_detect = ata_cable_40wire,
30325 diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
30326 index 99eddda..3a4c0aa 100644
30327 --- a/drivers/ata/pata_opti.c
30328 +++ b/drivers/ata/pata_opti.c
30329 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
30330 ATA_PIO_SHT(DRV_NAME),
30331 };
30332
30333 -static struct ata_port_operations opti_port_ops = {
30334 +static const struct ata_port_operations opti_port_ops = {
30335 .inherits = &ata_sff_port_ops,
30336 .cable_detect = ata_cable_40wire,
30337 .set_piomode = opti_set_piomode,
30338 diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
30339 index 86885a4..8e9968d 100644
30340 --- a/drivers/ata/pata_optidma.c
30341 +++ b/drivers/ata/pata_optidma.c
30342 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
30343 ATA_BMDMA_SHT(DRV_NAME),
30344 };
30345
30346 -static struct ata_port_operations optidma_port_ops = {
30347 +static const struct ata_port_operations optidma_port_ops = {
30348 .inherits = &ata_bmdma_port_ops,
30349 .cable_detect = ata_cable_40wire,
30350 .set_piomode = optidma_set_pio_mode,
30351 @@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
30352 .prereset = optidma_pre_reset,
30353 };
30354
30355 -static struct ata_port_operations optiplus_port_ops = {
30356 +static const struct ata_port_operations optiplus_port_ops = {
30357 .inherits = &optidma_port_ops,
30358 .set_piomode = optiplus_set_pio_mode,
30359 .set_dmamode = optiplus_set_dma_mode,
30360 diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
30361 index 11fb4cc..1a14022 100644
30362 --- a/drivers/ata/pata_palmld.c
30363 +++ b/drivers/ata/pata_palmld.c
30364 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
30365 ATA_PIO_SHT(DRV_NAME),
30366 };
30367
30368 -static struct ata_port_operations palmld_port_ops = {
30369 +static const struct ata_port_operations palmld_port_ops = {
30370 .inherits = &ata_sff_port_ops,
30371 .sff_data_xfer = ata_sff_data_xfer_noirq,
30372 .cable_detect = ata_cable_40wire,
30373 diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
30374 index dc99e26..7f4b1e4 100644
30375 --- a/drivers/ata/pata_pcmcia.c
30376 +++ b/drivers/ata/pata_pcmcia.c
30377 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
30378 ATA_PIO_SHT(DRV_NAME),
30379 };
30380
30381 -static struct ata_port_operations pcmcia_port_ops = {
30382 +static const struct ata_port_operations pcmcia_port_ops = {
30383 .inherits = &ata_sff_port_ops,
30384 .sff_data_xfer = ata_sff_data_xfer_noirq,
30385 .cable_detect = ata_cable_40wire,
30386 .set_mode = pcmcia_set_mode,
30387 };
30388
30389 -static struct ata_port_operations pcmcia_8bit_port_ops = {
30390 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
30391 .inherits = &ata_sff_port_ops,
30392 .sff_data_xfer = ata_data_xfer_8bit,
30393 .cable_detect = ata_cable_40wire,
30394 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
30395 unsigned long io_base, ctl_base;
30396 void __iomem *io_addr, *ctl_addr;
30397 int n_ports = 1;
30398 - struct ata_port_operations *ops = &pcmcia_port_ops;
30399 + const struct ata_port_operations *ops = &pcmcia_port_ops;
30400
30401 info = kzalloc(sizeof(*info), GFP_KERNEL);
30402 if (info == NULL)
30403 diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
30404 index ca5cad0..3a1f125 100644
30405 --- a/drivers/ata/pata_pdc2027x.c
30406 +++ b/drivers/ata/pata_pdc2027x.c
30407 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
30408 ATA_BMDMA_SHT(DRV_NAME),
30409 };
30410
30411 -static struct ata_port_operations pdc2027x_pata100_ops = {
30412 +static const struct ata_port_operations pdc2027x_pata100_ops = {
30413 .inherits = &ata_bmdma_port_ops,
30414 .check_atapi_dma = pdc2027x_check_atapi_dma,
30415 .cable_detect = pdc2027x_cable_detect,
30416 .prereset = pdc2027x_prereset,
30417 };
30418
30419 -static struct ata_port_operations pdc2027x_pata133_ops = {
30420 +static const struct ata_port_operations pdc2027x_pata133_ops = {
30421 .inherits = &pdc2027x_pata100_ops,
30422 .mode_filter = pdc2027x_mode_filter,
30423 .set_piomode = pdc2027x_set_piomode,
30424 diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
30425 index 2911120..4bf62aa 100644
30426 --- a/drivers/ata/pata_pdc202xx_old.c
30427 +++ b/drivers/ata/pata_pdc202xx_old.c
30428 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
30429 ATA_BMDMA_SHT(DRV_NAME),
30430 };
30431
30432 -static struct ata_port_operations pdc2024x_port_ops = {
30433 +static const struct ata_port_operations pdc2024x_port_ops = {
30434 .inherits = &ata_bmdma_port_ops,
30435
30436 .cable_detect = ata_cable_40wire,
30437 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
30438 .sff_exec_command = pdc202xx_exec_command,
30439 };
30440
30441 -static struct ata_port_operations pdc2026x_port_ops = {
30442 +static const struct ata_port_operations pdc2026x_port_ops = {
30443 .inherits = &pdc2024x_port_ops,
30444
30445 .check_atapi_dma = pdc2026x_check_atapi_dma,
30446 diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
30447 index 3f6ebc6..a18c358 100644
30448 --- a/drivers/ata/pata_platform.c
30449 +++ b/drivers/ata/pata_platform.c
30450 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
30451 ATA_PIO_SHT(DRV_NAME),
30452 };
30453
30454 -static struct ata_port_operations pata_platform_port_ops = {
30455 +static const struct ata_port_operations pata_platform_port_ops = {
30456 .inherits = &ata_sff_port_ops,
30457 .sff_data_xfer = ata_sff_data_xfer_noirq,
30458 .cable_detect = ata_cable_unknown,
30459 diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
30460 index 45879dc..165a9f9 100644
30461 --- a/drivers/ata/pata_qdi.c
30462 +++ b/drivers/ata/pata_qdi.c
30463 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
30464 ATA_PIO_SHT(DRV_NAME),
30465 };
30466
30467 -static struct ata_port_operations qdi6500_port_ops = {
30468 +static const struct ata_port_operations qdi6500_port_ops = {
30469 .inherits = &ata_sff_port_ops,
30470 .qc_issue = qdi_qc_issue,
30471 .sff_data_xfer = qdi_data_xfer,
30472 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
30473 .set_piomode = qdi6500_set_piomode,
30474 };
30475
30476 -static struct ata_port_operations qdi6580_port_ops = {
30477 +static const struct ata_port_operations qdi6580_port_ops = {
30478 .inherits = &qdi6500_port_ops,
30479 .set_piomode = qdi6580_set_piomode,
30480 };
30481 diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
30482 index 4401b33..716c5cc 100644
30483 --- a/drivers/ata/pata_radisys.c
30484 +++ b/drivers/ata/pata_radisys.c
30485 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
30486 ATA_BMDMA_SHT(DRV_NAME),
30487 };
30488
30489 -static struct ata_port_operations radisys_pata_ops = {
30490 +static const struct ata_port_operations radisys_pata_ops = {
30491 .inherits = &ata_bmdma_port_ops,
30492 .qc_issue = radisys_qc_issue,
30493 .cable_detect = ata_cable_unknown,
30494 diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
30495 index 45f1e10..fab6bca 100644
30496 --- a/drivers/ata/pata_rb532_cf.c
30497 +++ b/drivers/ata/pata_rb532_cf.c
30498 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
30499 return IRQ_HANDLED;
30500 }
30501
30502 -static struct ata_port_operations rb532_pata_port_ops = {
30503 +static const struct ata_port_operations rb532_pata_port_ops = {
30504 .inherits = &ata_sff_port_ops,
30505 .sff_data_xfer = ata_sff_data_xfer32,
30506 };
30507 diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
30508 index c843a1e..b5853c3 100644
30509 --- a/drivers/ata/pata_rdc.c
30510 +++ b/drivers/ata/pata_rdc.c
30511 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
30512 pci_write_config_byte(dev, 0x48, udma_enable);
30513 }
30514
30515 -static struct ata_port_operations rdc_pata_ops = {
30516 +static const struct ata_port_operations rdc_pata_ops = {
30517 .inherits = &ata_bmdma32_port_ops,
30518 .cable_detect = rdc_pata_cable_detect,
30519 .set_piomode = rdc_set_piomode,
30520 diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
30521 index a5e4dfe..080c8c9 100644
30522 --- a/drivers/ata/pata_rz1000.c
30523 +++ b/drivers/ata/pata_rz1000.c
30524 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
30525 ATA_PIO_SHT(DRV_NAME),
30526 };
30527
30528 -static struct ata_port_operations rz1000_port_ops = {
30529 +static const struct ata_port_operations rz1000_port_ops = {
30530 .inherits = &ata_sff_port_ops,
30531 .cable_detect = ata_cable_40wire,
30532 .set_mode = rz1000_set_mode,
30533 diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
30534 index 3bbed83..e309daf 100644
30535 --- a/drivers/ata/pata_sc1200.c
30536 +++ b/drivers/ata/pata_sc1200.c
30537 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
30538 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
30539 };
30540
30541 -static struct ata_port_operations sc1200_port_ops = {
30542 +static const struct ata_port_operations sc1200_port_ops = {
30543 .inherits = &ata_bmdma_port_ops,
30544 .qc_prep = ata_sff_dumb_qc_prep,
30545 .qc_issue = sc1200_qc_issue,
30546 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
30547 index 4257d6b..4c1d9d5 100644
30548 --- a/drivers/ata/pata_scc.c
30549 +++ b/drivers/ata/pata_scc.c
30550 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
30551 ATA_BMDMA_SHT(DRV_NAME),
30552 };
30553
30554 -static struct ata_port_operations scc_pata_ops = {
30555 +static const struct ata_port_operations scc_pata_ops = {
30556 .inherits = &ata_bmdma_port_ops,
30557
30558 .set_piomode = scc_set_piomode,
30559 diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
30560 index 99cceb45..e2e0a87 100644
30561 --- a/drivers/ata/pata_sch.c
30562 +++ b/drivers/ata/pata_sch.c
30563 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
30564 ATA_BMDMA_SHT(DRV_NAME),
30565 };
30566
30567 -static struct ata_port_operations sch_pata_ops = {
30568 +static const struct ata_port_operations sch_pata_ops = {
30569 .inherits = &ata_bmdma_port_ops,
30570 .cable_detect = ata_cable_unknown,
30571 .set_piomode = sch_set_piomode,
30572 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
30573 index beaed12..39969f1 100644
30574 --- a/drivers/ata/pata_serverworks.c
30575 +++ b/drivers/ata/pata_serverworks.c
30576 @@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
30577 ATA_BMDMA_SHT(DRV_NAME),
30578 };
30579
30580 -static struct ata_port_operations serverworks_osb4_port_ops = {
30581 +static const struct ata_port_operations serverworks_osb4_port_ops = {
30582 .inherits = &ata_bmdma_port_ops,
30583 .cable_detect = serverworks_cable_detect,
30584 .mode_filter = serverworks_osb4_filter,
30585 @@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
30586 .set_dmamode = serverworks_set_dmamode,
30587 };
30588
30589 -static struct ata_port_operations serverworks_csb_port_ops = {
30590 +static const struct ata_port_operations serverworks_csb_port_ops = {
30591 .inherits = &serverworks_osb4_port_ops,
30592 .mode_filter = serverworks_csb_filter,
30593 };
30594 diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
30595 index a2ace48..0463b44 100644
30596 --- a/drivers/ata/pata_sil680.c
30597 +++ b/drivers/ata/pata_sil680.c
30598 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
30599 ATA_BMDMA_SHT(DRV_NAME),
30600 };
30601
30602 -static struct ata_port_operations sil680_port_ops = {
30603 +static const struct ata_port_operations sil680_port_ops = {
30604 .inherits = &ata_bmdma32_port_ops,
30605 .cable_detect = sil680_cable_detect,
30606 .set_piomode = sil680_set_piomode,
30607 diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
30608 index 488e77b..b3724d5 100644
30609 --- a/drivers/ata/pata_sis.c
30610 +++ b/drivers/ata/pata_sis.c
30611 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
30612 ATA_BMDMA_SHT(DRV_NAME),
30613 };
30614
30615 -static struct ata_port_operations sis_133_for_sata_ops = {
30616 +static const struct ata_port_operations sis_133_for_sata_ops = {
30617 .inherits = &ata_bmdma_port_ops,
30618 .set_piomode = sis_133_set_piomode,
30619 .set_dmamode = sis_133_set_dmamode,
30620 .cable_detect = sis_133_cable_detect,
30621 };
30622
30623 -static struct ata_port_operations sis_base_ops = {
30624 +static const struct ata_port_operations sis_base_ops = {
30625 .inherits = &ata_bmdma_port_ops,
30626 .prereset = sis_pre_reset,
30627 };
30628
30629 -static struct ata_port_operations sis_133_ops = {
30630 +static const struct ata_port_operations sis_133_ops = {
30631 .inherits = &sis_base_ops,
30632 .set_piomode = sis_133_set_piomode,
30633 .set_dmamode = sis_133_set_dmamode,
30634 .cable_detect = sis_133_cable_detect,
30635 };
30636
30637 -static struct ata_port_operations sis_133_early_ops = {
30638 +static const struct ata_port_operations sis_133_early_ops = {
30639 .inherits = &sis_base_ops,
30640 .set_piomode = sis_100_set_piomode,
30641 .set_dmamode = sis_133_early_set_dmamode,
30642 .cable_detect = sis_66_cable_detect,
30643 };
30644
30645 -static struct ata_port_operations sis_100_ops = {
30646 +static const struct ata_port_operations sis_100_ops = {
30647 .inherits = &sis_base_ops,
30648 .set_piomode = sis_100_set_piomode,
30649 .set_dmamode = sis_100_set_dmamode,
30650 .cable_detect = sis_66_cable_detect,
30651 };
30652
30653 -static struct ata_port_operations sis_66_ops = {
30654 +static const struct ata_port_operations sis_66_ops = {
30655 .inherits = &sis_base_ops,
30656 .set_piomode = sis_old_set_piomode,
30657 .set_dmamode = sis_66_set_dmamode,
30658 .cable_detect = sis_66_cable_detect,
30659 };
30660
30661 -static struct ata_port_operations sis_old_ops = {
30662 +static const struct ata_port_operations sis_old_ops = {
30663 .inherits = &sis_base_ops,
30664 .set_piomode = sis_old_set_piomode,
30665 .set_dmamode = sis_old_set_dmamode,
30666 diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
30667 index 29f733c..43e9ca0 100644
30668 --- a/drivers/ata/pata_sl82c105.c
30669 +++ b/drivers/ata/pata_sl82c105.c
30670 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
30671 ATA_BMDMA_SHT(DRV_NAME),
30672 };
30673
30674 -static struct ata_port_operations sl82c105_port_ops = {
30675 +static const struct ata_port_operations sl82c105_port_ops = {
30676 .inherits = &ata_bmdma_port_ops,
30677 .qc_defer = sl82c105_qc_defer,
30678 .bmdma_start = sl82c105_bmdma_start,
30679 diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
30680 index f1f13ff..df39e99 100644
30681 --- a/drivers/ata/pata_triflex.c
30682 +++ b/drivers/ata/pata_triflex.c
30683 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
30684 ATA_BMDMA_SHT(DRV_NAME),
30685 };
30686
30687 -static struct ata_port_operations triflex_port_ops = {
30688 +static const struct ata_port_operations triflex_port_ops = {
30689 .inherits = &ata_bmdma_port_ops,
30690 .bmdma_start = triflex_bmdma_start,
30691 .bmdma_stop = triflex_bmdma_stop,
30692 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
30693 index 1d73b8d..98a4b29 100644
30694 --- a/drivers/ata/pata_via.c
30695 +++ b/drivers/ata/pata_via.c
30696 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
30697 ATA_BMDMA_SHT(DRV_NAME),
30698 };
30699
30700 -static struct ata_port_operations via_port_ops = {
30701 +static const struct ata_port_operations via_port_ops = {
30702 .inherits = &ata_bmdma_port_ops,
30703 .cable_detect = via_cable_detect,
30704 .set_piomode = via_set_piomode,
30705 @@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
30706 .port_start = via_port_start,
30707 };
30708
30709 -static struct ata_port_operations via_port_ops_noirq = {
30710 +static const struct ata_port_operations via_port_ops_noirq = {
30711 .inherits = &via_port_ops,
30712 .sff_data_xfer = ata_sff_data_xfer_noirq,
30713 };
30714 diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
30715 index 6d8619b..ad511c4 100644
30716 --- a/drivers/ata/pata_winbond.c
30717 +++ b/drivers/ata/pata_winbond.c
30718 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
30719 ATA_PIO_SHT(DRV_NAME),
30720 };
30721
30722 -static struct ata_port_operations winbond_port_ops = {
30723 +static const struct ata_port_operations winbond_port_ops = {
30724 .inherits = &ata_sff_port_ops,
30725 .sff_data_xfer = winbond_data_xfer,
30726 .cable_detect = ata_cable_40wire,
30727 diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
30728 index 6c65b07..f996ec7 100644
30729 --- a/drivers/ata/pdc_adma.c
30730 +++ b/drivers/ata/pdc_adma.c
30731 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
30732 .dma_boundary = ADMA_DMA_BOUNDARY,
30733 };
30734
30735 -static struct ata_port_operations adma_ata_ops = {
30736 +static const struct ata_port_operations adma_ata_ops = {
30737 .inherits = &ata_sff_port_ops,
30738
30739 .lost_interrupt = ATA_OP_NULL,
30740 diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
30741 index 172b57e..c49bc1e 100644
30742 --- a/drivers/ata/sata_fsl.c
30743 +++ b/drivers/ata/sata_fsl.c
30744 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
30745 .dma_boundary = ATA_DMA_BOUNDARY,
30746 };
30747
30748 -static struct ata_port_operations sata_fsl_ops = {
30749 +static const struct ata_port_operations sata_fsl_ops = {
30750 .inherits = &sata_pmp_port_ops,
30751
30752 .qc_defer = ata_std_qc_defer,
30753 diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
30754 index 4406902..60603ef 100644
30755 --- a/drivers/ata/sata_inic162x.c
30756 +++ b/drivers/ata/sata_inic162x.c
30757 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
30758 return 0;
30759 }
30760
30761 -static struct ata_port_operations inic_port_ops = {
30762 +static const struct ata_port_operations inic_port_ops = {
30763 .inherits = &sata_port_ops,
30764
30765 .check_atapi_dma = inic_check_atapi_dma,
30766 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
30767 index cf41126..8107be6 100644
30768 --- a/drivers/ata/sata_mv.c
30769 +++ b/drivers/ata/sata_mv.c
30770 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
30771 .dma_boundary = MV_DMA_BOUNDARY,
30772 };
30773
30774 -static struct ata_port_operations mv5_ops = {
30775 +static const struct ata_port_operations mv5_ops = {
30776 .inherits = &ata_sff_port_ops,
30777
30778 .lost_interrupt = ATA_OP_NULL,
30779 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
30780 .port_stop = mv_port_stop,
30781 };
30782
30783 -static struct ata_port_operations mv6_ops = {
30784 +static const struct ata_port_operations mv6_ops = {
30785 .inherits = &mv5_ops,
30786 .dev_config = mv6_dev_config,
30787 .scr_read = mv_scr_read,
30788 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
30789 .bmdma_status = mv_bmdma_status,
30790 };
30791
30792 -static struct ata_port_operations mv_iie_ops = {
30793 +static const struct ata_port_operations mv_iie_ops = {
30794 .inherits = &mv6_ops,
30795 .dev_config = ATA_OP_NULL,
30796 .qc_prep = mv_qc_prep_iie,
30797 diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
30798 index ae2297c..d5c9c33 100644
30799 --- a/drivers/ata/sata_nv.c
30800 +++ b/drivers/ata/sata_nv.c
30801 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
30802 * cases. Define nv_hardreset() which only kicks in for post-boot
30803 * probing and use it for all variants.
30804 */
30805 -static struct ata_port_operations nv_generic_ops = {
30806 +static const struct ata_port_operations nv_generic_ops = {
30807 .inherits = &ata_bmdma_port_ops,
30808 .lost_interrupt = ATA_OP_NULL,
30809 .scr_read = nv_scr_read,
30810 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
30811 .hardreset = nv_hardreset,
30812 };
30813
30814 -static struct ata_port_operations nv_nf2_ops = {
30815 +static const struct ata_port_operations nv_nf2_ops = {
30816 .inherits = &nv_generic_ops,
30817 .freeze = nv_nf2_freeze,
30818 .thaw = nv_nf2_thaw,
30819 };
30820
30821 -static struct ata_port_operations nv_ck804_ops = {
30822 +static const struct ata_port_operations nv_ck804_ops = {
30823 .inherits = &nv_generic_ops,
30824 .freeze = nv_ck804_freeze,
30825 .thaw = nv_ck804_thaw,
30826 .host_stop = nv_ck804_host_stop,
30827 };
30828
30829 -static struct ata_port_operations nv_adma_ops = {
30830 +static const struct ata_port_operations nv_adma_ops = {
30831 .inherits = &nv_ck804_ops,
30832
30833 .check_atapi_dma = nv_adma_check_atapi_dma,
30834 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
30835 .host_stop = nv_adma_host_stop,
30836 };
30837
30838 -static struct ata_port_operations nv_swncq_ops = {
30839 +static const struct ata_port_operations nv_swncq_ops = {
30840 .inherits = &nv_generic_ops,
30841
30842 .qc_defer = ata_std_qc_defer,
30843 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
30844 index 07d8d00..6cc70bb 100644
30845 --- a/drivers/ata/sata_promise.c
30846 +++ b/drivers/ata/sata_promise.c
30847 @@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
30848 .error_handler = pdc_error_handler,
30849 };
30850
30851 -static struct ata_port_operations pdc_sata_ops = {
30852 +static const struct ata_port_operations pdc_sata_ops = {
30853 .inherits = &pdc_common_ops,
30854 .cable_detect = pdc_sata_cable_detect,
30855 .freeze = pdc_sata_freeze,
30856 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
30857
30858 /* First-generation chips need a more restrictive ->check_atapi_dma op,
30859 and ->freeze/thaw that ignore the hotplug controls. */
30860 -static struct ata_port_operations pdc_old_sata_ops = {
30861 +static const struct ata_port_operations pdc_old_sata_ops = {
30862 .inherits = &pdc_sata_ops,
30863 .freeze = pdc_freeze,
30864 .thaw = pdc_thaw,
30865 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
30866 };
30867
30868 -static struct ata_port_operations pdc_pata_ops = {
30869 +static const struct ata_port_operations pdc_pata_ops = {
30870 .inherits = &pdc_common_ops,
30871 .cable_detect = pdc_pata_cable_detect,
30872 .freeze = pdc_freeze,
30873 diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
30874 index 326c0cf..36ecebe 100644
30875 --- a/drivers/ata/sata_qstor.c
30876 +++ b/drivers/ata/sata_qstor.c
30877 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
30878 .dma_boundary = QS_DMA_BOUNDARY,
30879 };
30880
30881 -static struct ata_port_operations qs_ata_ops = {
30882 +static const struct ata_port_operations qs_ata_ops = {
30883 .inherits = &ata_sff_port_ops,
30884
30885 .check_atapi_dma = qs_check_atapi_dma,
30886 diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
30887 index 3cb69d5..0871d3c 100644
30888 --- a/drivers/ata/sata_sil.c
30889 +++ b/drivers/ata/sata_sil.c
30890 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
30891 .sg_tablesize = ATA_MAX_PRD
30892 };
30893
30894 -static struct ata_port_operations sil_ops = {
30895 +static const struct ata_port_operations sil_ops = {
30896 .inherits = &ata_bmdma32_port_ops,
30897 .dev_config = sil_dev_config,
30898 .set_mode = sil_set_mode,
30899 diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
30900 index e6946fc..eddb794 100644
30901 --- a/drivers/ata/sata_sil24.c
30902 +++ b/drivers/ata/sata_sil24.c
30903 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
30904 .dma_boundary = ATA_DMA_BOUNDARY,
30905 };
30906
30907 -static struct ata_port_operations sil24_ops = {
30908 +static const struct ata_port_operations sil24_ops = {
30909 .inherits = &sata_pmp_port_ops,
30910
30911 .qc_defer = sil24_qc_defer,
30912 diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
30913 index f8a91bf..9cb06b6 100644
30914 --- a/drivers/ata/sata_sis.c
30915 +++ b/drivers/ata/sata_sis.c
30916 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
30917 ATA_BMDMA_SHT(DRV_NAME),
30918 };
30919
30920 -static struct ata_port_operations sis_ops = {
30921 +static const struct ata_port_operations sis_ops = {
30922 .inherits = &ata_bmdma_port_ops,
30923 .scr_read = sis_scr_read,
30924 .scr_write = sis_scr_write,
30925 diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
30926 index 7257f2d..d04c6f5 100644
30927 --- a/drivers/ata/sata_svw.c
30928 +++ b/drivers/ata/sata_svw.c
30929 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
30930 };
30931
30932
30933 -static struct ata_port_operations k2_sata_ops = {
30934 +static const struct ata_port_operations k2_sata_ops = {
30935 .inherits = &ata_bmdma_port_ops,
30936 .sff_tf_load = k2_sata_tf_load,
30937 .sff_tf_read = k2_sata_tf_read,
30938 diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
30939 index bbcf970..cd0df0d 100644
30940 --- a/drivers/ata/sata_sx4.c
30941 +++ b/drivers/ata/sata_sx4.c
30942 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
30943 };
30944
30945 /* TODO: inherit from base port_ops after converting to new EH */
30946 -static struct ata_port_operations pdc_20621_ops = {
30947 +static const struct ata_port_operations pdc_20621_ops = {
30948 .inherits = &ata_sff_port_ops,
30949
30950 .check_atapi_dma = pdc_check_atapi_dma,
30951 diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
30952 index e5bff47..089d859 100644
30953 --- a/drivers/ata/sata_uli.c
30954 +++ b/drivers/ata/sata_uli.c
30955 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
30956 ATA_BMDMA_SHT(DRV_NAME),
30957 };
30958
30959 -static struct ata_port_operations uli_ops = {
30960 +static const struct ata_port_operations uli_ops = {
30961 .inherits = &ata_bmdma_port_ops,
30962 .scr_read = uli_scr_read,
30963 .scr_write = uli_scr_write,
30964 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
30965 index f5dcca7..77b94eb 100644
30966 --- a/drivers/ata/sata_via.c
30967 +++ b/drivers/ata/sata_via.c
30968 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
30969 ATA_BMDMA_SHT(DRV_NAME),
30970 };
30971
30972 -static struct ata_port_operations svia_base_ops = {
30973 +static const struct ata_port_operations svia_base_ops = {
30974 .inherits = &ata_bmdma_port_ops,
30975 .sff_tf_load = svia_tf_load,
30976 };
30977
30978 -static struct ata_port_operations vt6420_sata_ops = {
30979 +static const struct ata_port_operations vt6420_sata_ops = {
30980 .inherits = &svia_base_ops,
30981 .freeze = svia_noop_freeze,
30982 .prereset = vt6420_prereset,
30983 .bmdma_start = vt6420_bmdma_start,
30984 };
30985
30986 -static struct ata_port_operations vt6421_pata_ops = {
30987 +static const struct ata_port_operations vt6421_pata_ops = {
30988 .inherits = &svia_base_ops,
30989 .cable_detect = vt6421_pata_cable_detect,
30990 .set_piomode = vt6421_set_pio_mode,
30991 .set_dmamode = vt6421_set_dma_mode,
30992 };
30993
30994 -static struct ata_port_operations vt6421_sata_ops = {
30995 +static const struct ata_port_operations vt6421_sata_ops = {
30996 .inherits = &svia_base_ops,
30997 .scr_read = svia_scr_read,
30998 .scr_write = svia_scr_write,
30999 };
31000
31001 -static struct ata_port_operations vt8251_ops = {
31002 +static const struct ata_port_operations vt8251_ops = {
31003 .inherits = &svia_base_ops,
31004 .hardreset = sata_std_hardreset,
31005 .scr_read = vt8251_scr_read,
31006 diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
31007 index 8b2a278..51e65d3 100644
31008 --- a/drivers/ata/sata_vsc.c
31009 +++ b/drivers/ata/sata_vsc.c
31010 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
31011 };
31012
31013
31014 -static struct ata_port_operations vsc_sata_ops = {
31015 +static const struct ata_port_operations vsc_sata_ops = {
31016 .inherits = &ata_bmdma_port_ops,
31017 /* The IRQ handling is not quite standard SFF behaviour so we
31018 cannot use the default lost interrupt handler */
31019 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
31020 index 5effec6..7e4019a 100644
31021 --- a/drivers/atm/adummy.c
31022 +++ b/drivers/atm/adummy.c
31023 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
31024 vcc->pop(vcc, skb);
31025 else
31026 dev_kfree_skb_any(skb);
31027 - atomic_inc(&vcc->stats->tx);
31028 + atomic_inc_unchecked(&vcc->stats->tx);
31029
31030 return 0;
31031 }
31032 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
31033 index 66e1813..26a27c6 100644
31034 --- a/drivers/atm/ambassador.c
31035 +++ b/drivers/atm/ambassador.c
31036 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
31037 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
31038
31039 // VC layer stats
31040 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31041 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31042
31043 // free the descriptor
31044 kfree (tx_descr);
31045 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31046 dump_skb ("<<<", vc, skb);
31047
31048 // VC layer stats
31049 - atomic_inc(&atm_vcc->stats->rx);
31050 + atomic_inc_unchecked(&atm_vcc->stats->rx);
31051 __net_timestamp(skb);
31052 // end of our responsability
31053 atm_vcc->push (atm_vcc, skb);
31054 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31055 } else {
31056 PRINTK (KERN_INFO, "dropped over-size frame");
31057 // should we count this?
31058 - atomic_inc(&atm_vcc->stats->rx_drop);
31059 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31060 }
31061
31062 } else {
31063 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
31064 }
31065
31066 if (check_area (skb->data, skb->len)) {
31067 - atomic_inc(&atm_vcc->stats->tx_err);
31068 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
31069 return -ENOMEM; // ?
31070 }
31071
31072 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
31073 index 02ad83d..6daffeb 100644
31074 --- a/drivers/atm/atmtcp.c
31075 +++ b/drivers/atm/atmtcp.c
31076 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31077 if (vcc->pop) vcc->pop(vcc,skb);
31078 else dev_kfree_skb(skb);
31079 if (dev_data) return 0;
31080 - atomic_inc(&vcc->stats->tx_err);
31081 + atomic_inc_unchecked(&vcc->stats->tx_err);
31082 return -ENOLINK;
31083 }
31084 size = skb->len+sizeof(struct atmtcp_hdr);
31085 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31086 if (!new_skb) {
31087 if (vcc->pop) vcc->pop(vcc,skb);
31088 else dev_kfree_skb(skb);
31089 - atomic_inc(&vcc->stats->tx_err);
31090 + atomic_inc_unchecked(&vcc->stats->tx_err);
31091 return -ENOBUFS;
31092 }
31093 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
31094 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31095 if (vcc->pop) vcc->pop(vcc,skb);
31096 else dev_kfree_skb(skb);
31097 out_vcc->push(out_vcc,new_skb);
31098 - atomic_inc(&vcc->stats->tx);
31099 - atomic_inc(&out_vcc->stats->rx);
31100 + atomic_inc_unchecked(&vcc->stats->tx);
31101 + atomic_inc_unchecked(&out_vcc->stats->rx);
31102 return 0;
31103 }
31104
31105 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31106 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
31107 read_unlock(&vcc_sklist_lock);
31108 if (!out_vcc) {
31109 - atomic_inc(&vcc->stats->tx_err);
31110 + atomic_inc_unchecked(&vcc->stats->tx_err);
31111 goto done;
31112 }
31113 skb_pull(skb,sizeof(struct atmtcp_hdr));
31114 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31115 __net_timestamp(new_skb);
31116 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
31117 out_vcc->push(out_vcc,new_skb);
31118 - atomic_inc(&vcc->stats->tx);
31119 - atomic_inc(&out_vcc->stats->rx);
31120 + atomic_inc_unchecked(&vcc->stats->tx);
31121 + atomic_inc_unchecked(&out_vcc->stats->rx);
31122 done:
31123 if (vcc->pop) vcc->pop(vcc,skb);
31124 else dev_kfree_skb(skb);
31125 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
31126 index 0c30261..3da356e 100644
31127 --- a/drivers/atm/eni.c
31128 +++ b/drivers/atm/eni.c
31129 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
31130 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
31131 vcc->dev->number);
31132 length = 0;
31133 - atomic_inc(&vcc->stats->rx_err);
31134 + atomic_inc_unchecked(&vcc->stats->rx_err);
31135 }
31136 else {
31137 length = ATM_CELL_SIZE-1; /* no HEC */
31138 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31139 size);
31140 }
31141 eff = length = 0;
31142 - atomic_inc(&vcc->stats->rx_err);
31143 + atomic_inc_unchecked(&vcc->stats->rx_err);
31144 }
31145 else {
31146 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
31147 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31148 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
31149 vcc->dev->number,vcc->vci,length,size << 2,descr);
31150 length = eff = 0;
31151 - atomic_inc(&vcc->stats->rx_err);
31152 + atomic_inc_unchecked(&vcc->stats->rx_err);
31153 }
31154 }
31155 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
31156 @@ -770,7 +770,7 @@ rx_dequeued++;
31157 vcc->push(vcc,skb);
31158 pushed++;
31159 }
31160 - atomic_inc(&vcc->stats->rx);
31161 + atomic_inc_unchecked(&vcc->stats->rx);
31162 }
31163 wake_up(&eni_dev->rx_wait);
31164 }
31165 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
31166 PCI_DMA_TODEVICE);
31167 if (vcc->pop) vcc->pop(vcc,skb);
31168 else dev_kfree_skb_irq(skb);
31169 - atomic_inc(&vcc->stats->tx);
31170 + atomic_inc_unchecked(&vcc->stats->tx);
31171 wake_up(&eni_dev->tx_wait);
31172 dma_complete++;
31173 }
31174 @@ -1570,7 +1570,7 @@ tx_complete++;
31175 /*--------------------------------- entries ---------------------------------*/
31176
31177
31178 -static const char *media_name[] __devinitdata = {
31179 +static const char *media_name[] __devinitconst = {
31180 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
31181 "UTP", "05?", "06?", "07?", /* 4- 7 */
31182 "TAXI","09?", "10?", "11?", /* 8-11 */
31183 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
31184 index cd5049a..a51209f 100644
31185 --- a/drivers/atm/firestream.c
31186 +++ b/drivers/atm/firestream.c
31187 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
31188 }
31189 }
31190
31191 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31192 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31193
31194 fs_dprintk (FS_DEBUG_TXMEM, "i");
31195 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
31196 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31197 #endif
31198 skb_put (skb, qe->p1 & 0xffff);
31199 ATM_SKB(skb)->vcc = atm_vcc;
31200 - atomic_inc(&atm_vcc->stats->rx);
31201 + atomic_inc_unchecked(&atm_vcc->stats->rx);
31202 __net_timestamp(skb);
31203 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
31204 atm_vcc->push (atm_vcc, skb);
31205 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31206 kfree (pe);
31207 }
31208 if (atm_vcc)
31209 - atomic_inc(&atm_vcc->stats->rx_drop);
31210 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31211 break;
31212 case 0x1f: /* Reassembly abort: no buffers. */
31213 /* Silently increment error counter. */
31214 if (atm_vcc)
31215 - atomic_inc(&atm_vcc->stats->rx_drop);
31216 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31217 break;
31218 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
31219 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
31220 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
31221 index f766cc4..a34002e 100644
31222 --- a/drivers/atm/fore200e.c
31223 +++ b/drivers/atm/fore200e.c
31224 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
31225 #endif
31226 /* check error condition */
31227 if (*entry->status & STATUS_ERROR)
31228 - atomic_inc(&vcc->stats->tx_err);
31229 + atomic_inc_unchecked(&vcc->stats->tx_err);
31230 else
31231 - atomic_inc(&vcc->stats->tx);
31232 + atomic_inc_unchecked(&vcc->stats->tx);
31233 }
31234 }
31235
31236 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31237 if (skb == NULL) {
31238 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
31239
31240 - atomic_inc(&vcc->stats->rx_drop);
31241 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31242 return -ENOMEM;
31243 }
31244
31245 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31246
31247 dev_kfree_skb_any(skb);
31248
31249 - atomic_inc(&vcc->stats->rx_drop);
31250 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31251 return -ENOMEM;
31252 }
31253
31254 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31255
31256 vcc->push(vcc, skb);
31257 - atomic_inc(&vcc->stats->rx);
31258 + atomic_inc_unchecked(&vcc->stats->rx);
31259
31260 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31261
31262 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
31263 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
31264 fore200e->atm_dev->number,
31265 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
31266 - atomic_inc(&vcc->stats->rx_err);
31267 + atomic_inc_unchecked(&vcc->stats->rx_err);
31268 }
31269 }
31270
31271 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
31272 goto retry_here;
31273 }
31274
31275 - atomic_inc(&vcc->stats->tx_err);
31276 + atomic_inc_unchecked(&vcc->stats->tx_err);
31277
31278 fore200e->tx_sat++;
31279 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
31280 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
31281 index 7066703..2b130de 100644
31282 --- a/drivers/atm/he.c
31283 +++ b/drivers/atm/he.c
31284 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31285
31286 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
31287 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
31288 - atomic_inc(&vcc->stats->rx_drop);
31289 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31290 goto return_host_buffers;
31291 }
31292
31293 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31294 RBRQ_LEN_ERR(he_dev->rbrq_head)
31295 ? "LEN_ERR" : "",
31296 vcc->vpi, vcc->vci);
31297 - atomic_inc(&vcc->stats->rx_err);
31298 + atomic_inc_unchecked(&vcc->stats->rx_err);
31299 goto return_host_buffers;
31300 }
31301
31302 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31303 vcc->push(vcc, skb);
31304 spin_lock(&he_dev->global_lock);
31305
31306 - atomic_inc(&vcc->stats->rx);
31307 + atomic_inc_unchecked(&vcc->stats->rx);
31308
31309 return_host_buffers:
31310 ++pdus_assembled;
31311 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
31312 tpd->vcc->pop(tpd->vcc, tpd->skb);
31313 else
31314 dev_kfree_skb_any(tpd->skb);
31315 - atomic_inc(&tpd->vcc->stats->tx_err);
31316 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
31317 }
31318 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
31319 return;
31320 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31321 vcc->pop(vcc, skb);
31322 else
31323 dev_kfree_skb_any(skb);
31324 - atomic_inc(&vcc->stats->tx_err);
31325 + atomic_inc_unchecked(&vcc->stats->tx_err);
31326 return -EINVAL;
31327 }
31328
31329 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31330 vcc->pop(vcc, skb);
31331 else
31332 dev_kfree_skb_any(skb);
31333 - atomic_inc(&vcc->stats->tx_err);
31334 + atomic_inc_unchecked(&vcc->stats->tx_err);
31335 return -EINVAL;
31336 }
31337 #endif
31338 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31339 vcc->pop(vcc, skb);
31340 else
31341 dev_kfree_skb_any(skb);
31342 - atomic_inc(&vcc->stats->tx_err);
31343 + atomic_inc_unchecked(&vcc->stats->tx_err);
31344 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31345 return -ENOMEM;
31346 }
31347 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31348 vcc->pop(vcc, skb);
31349 else
31350 dev_kfree_skb_any(skb);
31351 - atomic_inc(&vcc->stats->tx_err);
31352 + atomic_inc_unchecked(&vcc->stats->tx_err);
31353 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31354 return -ENOMEM;
31355 }
31356 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31357 __enqueue_tpd(he_dev, tpd, cid);
31358 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31359
31360 - atomic_inc(&vcc->stats->tx);
31361 + atomic_inc_unchecked(&vcc->stats->tx);
31362
31363 return 0;
31364 }
31365 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
31366 index 4e49021..01b1512 100644
31367 --- a/drivers/atm/horizon.c
31368 +++ b/drivers/atm/horizon.c
31369 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
31370 {
31371 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
31372 // VC layer stats
31373 - atomic_inc(&vcc->stats->rx);
31374 + atomic_inc_unchecked(&vcc->stats->rx);
31375 __net_timestamp(skb);
31376 // end of our responsability
31377 vcc->push (vcc, skb);
31378 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
31379 dev->tx_iovec = NULL;
31380
31381 // VC layer stats
31382 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31383 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31384
31385 // free the skb
31386 hrz_kfree_skb (skb);
31387 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
31388 index e33ae00..9deb4ab 100644
31389 --- a/drivers/atm/idt77252.c
31390 +++ b/drivers/atm/idt77252.c
31391 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
31392 else
31393 dev_kfree_skb(skb);
31394
31395 - atomic_inc(&vcc->stats->tx);
31396 + atomic_inc_unchecked(&vcc->stats->tx);
31397 }
31398
31399 atomic_dec(&scq->used);
31400 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31401 if ((sb = dev_alloc_skb(64)) == NULL) {
31402 printk("%s: Can't allocate buffers for aal0.\n",
31403 card->name);
31404 - atomic_add(i, &vcc->stats->rx_drop);
31405 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
31406 break;
31407 }
31408 if (!atm_charge(vcc, sb->truesize)) {
31409 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
31410 card->name);
31411 - atomic_add(i - 1, &vcc->stats->rx_drop);
31412 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
31413 dev_kfree_skb(sb);
31414 break;
31415 }
31416 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31417 ATM_SKB(sb)->vcc = vcc;
31418 __net_timestamp(sb);
31419 vcc->push(vcc, sb);
31420 - atomic_inc(&vcc->stats->rx);
31421 + atomic_inc_unchecked(&vcc->stats->rx);
31422
31423 cell += ATM_CELL_PAYLOAD;
31424 }
31425 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31426 "(CDC: %08x)\n",
31427 card->name, len, rpp->len, readl(SAR_REG_CDC));
31428 recycle_rx_pool_skb(card, rpp);
31429 - atomic_inc(&vcc->stats->rx_err);
31430 + atomic_inc_unchecked(&vcc->stats->rx_err);
31431 return;
31432 }
31433 if (stat & SAR_RSQE_CRC) {
31434 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
31435 recycle_rx_pool_skb(card, rpp);
31436 - atomic_inc(&vcc->stats->rx_err);
31437 + atomic_inc_unchecked(&vcc->stats->rx_err);
31438 return;
31439 }
31440 if (skb_queue_len(&rpp->queue) > 1) {
31441 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31442 RXPRINTK("%s: Can't alloc RX skb.\n",
31443 card->name);
31444 recycle_rx_pool_skb(card, rpp);
31445 - atomic_inc(&vcc->stats->rx_err);
31446 + atomic_inc_unchecked(&vcc->stats->rx_err);
31447 return;
31448 }
31449 if (!atm_charge(vcc, skb->truesize)) {
31450 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31451 __net_timestamp(skb);
31452
31453 vcc->push(vcc, skb);
31454 - atomic_inc(&vcc->stats->rx);
31455 + atomic_inc_unchecked(&vcc->stats->rx);
31456
31457 return;
31458 }
31459 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31460 __net_timestamp(skb);
31461
31462 vcc->push(vcc, skb);
31463 - atomic_inc(&vcc->stats->rx);
31464 + atomic_inc_unchecked(&vcc->stats->rx);
31465
31466 if (skb->truesize > SAR_FB_SIZE_3)
31467 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
31468 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
31469 if (vcc->qos.aal != ATM_AAL0) {
31470 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
31471 card->name, vpi, vci);
31472 - atomic_inc(&vcc->stats->rx_drop);
31473 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31474 goto drop;
31475 }
31476
31477 if ((sb = dev_alloc_skb(64)) == NULL) {
31478 printk("%s: Can't allocate buffers for AAL0.\n",
31479 card->name);
31480 - atomic_inc(&vcc->stats->rx_err);
31481 + atomic_inc_unchecked(&vcc->stats->rx_err);
31482 goto drop;
31483 }
31484
31485 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
31486 ATM_SKB(sb)->vcc = vcc;
31487 __net_timestamp(sb);
31488 vcc->push(vcc, sb);
31489 - atomic_inc(&vcc->stats->rx);
31490 + atomic_inc_unchecked(&vcc->stats->rx);
31491
31492 drop:
31493 skb_pull(queue, 64);
31494 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31495
31496 if (vc == NULL) {
31497 printk("%s: NULL connection in send().\n", card->name);
31498 - atomic_inc(&vcc->stats->tx_err);
31499 + atomic_inc_unchecked(&vcc->stats->tx_err);
31500 dev_kfree_skb(skb);
31501 return -EINVAL;
31502 }
31503 if (!test_bit(VCF_TX, &vc->flags)) {
31504 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
31505 - atomic_inc(&vcc->stats->tx_err);
31506 + atomic_inc_unchecked(&vcc->stats->tx_err);
31507 dev_kfree_skb(skb);
31508 return -EINVAL;
31509 }
31510 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31511 break;
31512 default:
31513 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
31514 - atomic_inc(&vcc->stats->tx_err);
31515 + atomic_inc_unchecked(&vcc->stats->tx_err);
31516 dev_kfree_skb(skb);
31517 return -EINVAL;
31518 }
31519
31520 if (skb_shinfo(skb)->nr_frags != 0) {
31521 printk("%s: No scatter-gather yet.\n", card->name);
31522 - atomic_inc(&vcc->stats->tx_err);
31523 + atomic_inc_unchecked(&vcc->stats->tx_err);
31524 dev_kfree_skb(skb);
31525 return -EINVAL;
31526 }
31527 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31528
31529 err = queue_skb(card, vc, skb, oam);
31530 if (err) {
31531 - atomic_inc(&vcc->stats->tx_err);
31532 + atomic_inc_unchecked(&vcc->stats->tx_err);
31533 dev_kfree_skb(skb);
31534 return err;
31535 }
31536 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
31537 skb = dev_alloc_skb(64);
31538 if (!skb) {
31539 printk("%s: Out of memory in send_oam().\n", card->name);
31540 - atomic_inc(&vcc->stats->tx_err);
31541 + atomic_inc_unchecked(&vcc->stats->tx_err);
31542 return -ENOMEM;
31543 }
31544 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
31545 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
31546 index b2c1b37..faa672b 100644
31547 --- a/drivers/atm/iphase.c
31548 +++ b/drivers/atm/iphase.c
31549 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
31550 status = (u_short) (buf_desc_ptr->desc_mode);
31551 if (status & (RX_CER | RX_PTE | RX_OFL))
31552 {
31553 - atomic_inc(&vcc->stats->rx_err);
31554 + atomic_inc_unchecked(&vcc->stats->rx_err);
31555 IF_ERR(printk("IA: bad packet, dropping it");)
31556 if (status & RX_CER) {
31557 IF_ERR(printk(" cause: packet CRC error\n");)
31558 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
31559 len = dma_addr - buf_addr;
31560 if (len > iadev->rx_buf_sz) {
31561 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
31562 - atomic_inc(&vcc->stats->rx_err);
31563 + atomic_inc_unchecked(&vcc->stats->rx_err);
31564 goto out_free_desc;
31565 }
31566
31567 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31568 ia_vcc = INPH_IA_VCC(vcc);
31569 if (ia_vcc == NULL)
31570 {
31571 - atomic_inc(&vcc->stats->rx_err);
31572 + atomic_inc_unchecked(&vcc->stats->rx_err);
31573 dev_kfree_skb_any(skb);
31574 atm_return(vcc, atm_guess_pdu2truesize(len));
31575 goto INCR_DLE;
31576 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31577 if ((length > iadev->rx_buf_sz) || (length >
31578 (skb->len - sizeof(struct cpcs_trailer))))
31579 {
31580 - atomic_inc(&vcc->stats->rx_err);
31581 + atomic_inc_unchecked(&vcc->stats->rx_err);
31582 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
31583 length, skb->len);)
31584 dev_kfree_skb_any(skb);
31585 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31586
31587 IF_RX(printk("rx_dle_intr: skb push");)
31588 vcc->push(vcc,skb);
31589 - atomic_inc(&vcc->stats->rx);
31590 + atomic_inc_unchecked(&vcc->stats->rx);
31591 iadev->rx_pkt_cnt++;
31592 }
31593 INCR_DLE:
31594 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
31595 {
31596 struct k_sonet_stats *stats;
31597 stats = &PRIV(_ia_dev[board])->sonet_stats;
31598 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
31599 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
31600 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
31601 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
31602 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
31603 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
31604 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
31605 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
31606 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
31607 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
31608 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
31609 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
31610 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
31611 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
31612 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
31613 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
31614 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
31615 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
31616 }
31617 ia_cmds.status = 0;
31618 break;
31619 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31620 if ((desc == 0) || (desc > iadev->num_tx_desc))
31621 {
31622 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
31623 - atomic_inc(&vcc->stats->tx);
31624 + atomic_inc_unchecked(&vcc->stats->tx);
31625 if (vcc->pop)
31626 vcc->pop(vcc, skb);
31627 else
31628 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31629 ATM_DESC(skb) = vcc->vci;
31630 skb_queue_tail(&iadev->tx_dma_q, skb);
31631
31632 - atomic_inc(&vcc->stats->tx);
31633 + atomic_inc_unchecked(&vcc->stats->tx);
31634 iadev->tx_pkt_cnt++;
31635 /* Increment transaction counter */
31636 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
31637
31638 #if 0
31639 /* add flow control logic */
31640 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
31641 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
31642 if (iavcc->vc_desc_cnt > 10) {
31643 vcc->tx_quota = vcc->tx_quota * 3 / 4;
31644 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
31645 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
31646 index cf97c34..8d30655 100644
31647 --- a/drivers/atm/lanai.c
31648 +++ b/drivers/atm/lanai.c
31649 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
31650 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
31651 lanai_endtx(lanai, lvcc);
31652 lanai_free_skb(lvcc->tx.atmvcc, skb);
31653 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
31654 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
31655 }
31656
31657 /* Try to fill the buffer - don't call unless there is backlog */
31658 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
31659 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
31660 __net_timestamp(skb);
31661 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
31662 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
31663 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
31664 out:
31665 lvcc->rx.buf.ptr = end;
31666 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
31667 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31668 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
31669 "vcc %d\n", lanai->number, (unsigned int) s, vci);
31670 lanai->stats.service_rxnotaal5++;
31671 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31672 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31673 return 0;
31674 }
31675 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
31676 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31677 int bytes;
31678 read_unlock(&vcc_sklist_lock);
31679 DPRINTK("got trashed rx pdu on vci %d\n", vci);
31680 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31681 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31682 lvcc->stats.x.aal5.service_trash++;
31683 bytes = (SERVICE_GET_END(s) * 16) -
31684 (((unsigned long) lvcc->rx.buf.ptr) -
31685 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31686 }
31687 if (s & SERVICE_STREAM) {
31688 read_unlock(&vcc_sklist_lock);
31689 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31690 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31691 lvcc->stats.x.aal5.service_stream++;
31692 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
31693 "PDU on VCI %d!\n", lanai->number, vci);
31694 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31695 return 0;
31696 }
31697 DPRINTK("got rx crc error on vci %d\n", vci);
31698 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31699 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31700 lvcc->stats.x.aal5.service_rxcrc++;
31701 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
31702 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
31703 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
31704 index 3da804b..d3b0eed 100644
31705 --- a/drivers/atm/nicstar.c
31706 +++ b/drivers/atm/nicstar.c
31707 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31708 if ((vc = (vc_map *) vcc->dev_data) == NULL)
31709 {
31710 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
31711 - atomic_inc(&vcc->stats->tx_err);
31712 + atomic_inc_unchecked(&vcc->stats->tx_err);
31713 dev_kfree_skb_any(skb);
31714 return -EINVAL;
31715 }
31716 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31717 if (!vc->tx)
31718 {
31719 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
31720 - atomic_inc(&vcc->stats->tx_err);
31721 + atomic_inc_unchecked(&vcc->stats->tx_err);
31722 dev_kfree_skb_any(skb);
31723 return -EINVAL;
31724 }
31725 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31726 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
31727 {
31728 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
31729 - atomic_inc(&vcc->stats->tx_err);
31730 + atomic_inc_unchecked(&vcc->stats->tx_err);
31731 dev_kfree_skb_any(skb);
31732 return -EINVAL;
31733 }
31734 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31735 if (skb_shinfo(skb)->nr_frags != 0)
31736 {
31737 printk("nicstar%d: No scatter-gather yet.\n", card->index);
31738 - atomic_inc(&vcc->stats->tx_err);
31739 + atomic_inc_unchecked(&vcc->stats->tx_err);
31740 dev_kfree_skb_any(skb);
31741 return -EINVAL;
31742 }
31743 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31744
31745 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
31746 {
31747 - atomic_inc(&vcc->stats->tx_err);
31748 + atomic_inc_unchecked(&vcc->stats->tx_err);
31749 dev_kfree_skb_any(skb);
31750 return -EIO;
31751 }
31752 - atomic_inc(&vcc->stats->tx);
31753 + atomic_inc_unchecked(&vcc->stats->tx);
31754
31755 return 0;
31756 }
31757 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31758 {
31759 printk("nicstar%d: Can't allocate buffers for aal0.\n",
31760 card->index);
31761 - atomic_add(i,&vcc->stats->rx_drop);
31762 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
31763 break;
31764 }
31765 if (!atm_charge(vcc, sb->truesize))
31766 {
31767 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
31768 card->index);
31769 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
31770 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
31771 dev_kfree_skb_any(sb);
31772 break;
31773 }
31774 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31775 ATM_SKB(sb)->vcc = vcc;
31776 __net_timestamp(sb);
31777 vcc->push(vcc, sb);
31778 - atomic_inc(&vcc->stats->rx);
31779 + atomic_inc_unchecked(&vcc->stats->rx);
31780 cell += ATM_CELL_PAYLOAD;
31781 }
31782
31783 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31784 if (iovb == NULL)
31785 {
31786 printk("nicstar%d: Out of iovec buffers.\n", card->index);
31787 - atomic_inc(&vcc->stats->rx_drop);
31788 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31789 recycle_rx_buf(card, skb);
31790 return;
31791 }
31792 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31793 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
31794 {
31795 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
31796 - atomic_inc(&vcc->stats->rx_err);
31797 + atomic_inc_unchecked(&vcc->stats->rx_err);
31798 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
31799 NS_SKB(iovb)->iovcnt = 0;
31800 iovb->len = 0;
31801 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31802 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
31803 card->index);
31804 which_list(card, skb);
31805 - atomic_inc(&vcc->stats->rx_err);
31806 + atomic_inc_unchecked(&vcc->stats->rx_err);
31807 recycle_rx_buf(card, skb);
31808 vc->rx_iov = NULL;
31809 recycle_iov_buf(card, iovb);
31810 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31811 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
31812 card->index);
31813 which_list(card, skb);
31814 - atomic_inc(&vcc->stats->rx_err);
31815 + atomic_inc_unchecked(&vcc->stats->rx_err);
31816 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31817 NS_SKB(iovb)->iovcnt);
31818 vc->rx_iov = NULL;
31819 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31820 printk(" - PDU size mismatch.\n");
31821 else
31822 printk(".\n");
31823 - atomic_inc(&vcc->stats->rx_err);
31824 + atomic_inc_unchecked(&vcc->stats->rx_err);
31825 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31826 NS_SKB(iovb)->iovcnt);
31827 vc->rx_iov = NULL;
31828 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31829 if (!atm_charge(vcc, skb->truesize))
31830 {
31831 push_rxbufs(card, skb);
31832 - atomic_inc(&vcc->stats->rx_drop);
31833 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31834 }
31835 else
31836 {
31837 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31838 ATM_SKB(skb)->vcc = vcc;
31839 __net_timestamp(skb);
31840 vcc->push(vcc, skb);
31841 - atomic_inc(&vcc->stats->rx);
31842 + atomic_inc_unchecked(&vcc->stats->rx);
31843 }
31844 }
31845 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
31846 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31847 if (!atm_charge(vcc, sb->truesize))
31848 {
31849 push_rxbufs(card, sb);
31850 - atomic_inc(&vcc->stats->rx_drop);
31851 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31852 }
31853 else
31854 {
31855 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31856 ATM_SKB(sb)->vcc = vcc;
31857 __net_timestamp(sb);
31858 vcc->push(vcc, sb);
31859 - atomic_inc(&vcc->stats->rx);
31860 + atomic_inc_unchecked(&vcc->stats->rx);
31861 }
31862
31863 push_rxbufs(card, skb);
31864 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31865 if (!atm_charge(vcc, skb->truesize))
31866 {
31867 push_rxbufs(card, skb);
31868 - atomic_inc(&vcc->stats->rx_drop);
31869 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31870 }
31871 else
31872 {
31873 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31874 ATM_SKB(skb)->vcc = vcc;
31875 __net_timestamp(skb);
31876 vcc->push(vcc, skb);
31877 - atomic_inc(&vcc->stats->rx);
31878 + atomic_inc_unchecked(&vcc->stats->rx);
31879 }
31880
31881 push_rxbufs(card, sb);
31882 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31883 if (hb == NULL)
31884 {
31885 printk("nicstar%d: Out of huge buffers.\n", card->index);
31886 - atomic_inc(&vcc->stats->rx_drop);
31887 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31888 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31889 NS_SKB(iovb)->iovcnt);
31890 vc->rx_iov = NULL;
31891 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31892 }
31893 else
31894 dev_kfree_skb_any(hb);
31895 - atomic_inc(&vcc->stats->rx_drop);
31896 + atomic_inc_unchecked(&vcc->stats->rx_drop);
31897 }
31898 else
31899 {
31900 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31901 #endif /* NS_USE_DESTRUCTORS */
31902 __net_timestamp(hb);
31903 vcc->push(vcc, hb);
31904 - atomic_inc(&vcc->stats->rx);
31905 + atomic_inc_unchecked(&vcc->stats->rx);
31906 }
31907 }
31908
31909 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
31910 index 84c93ff..e6ed269 100644
31911 --- a/drivers/atm/solos-pci.c
31912 +++ b/drivers/atm/solos-pci.c
31913 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
31914 }
31915 atm_charge(vcc, skb->truesize);
31916 vcc->push(vcc, skb);
31917 - atomic_inc(&vcc->stats->rx);
31918 + atomic_inc_unchecked(&vcc->stats->rx);
31919 break;
31920
31921 case PKT_STATUS:
31922 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
31923 char msg[500];
31924 char item[10];
31925
31926 + pax_track_stack();
31927 +
31928 len = buf->len;
31929 for (i = 0; i < len; i++){
31930 if(i % 8 == 0)
31931 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
31932 vcc = SKB_CB(oldskb)->vcc;
31933
31934 if (vcc) {
31935 - atomic_inc(&vcc->stats->tx);
31936 + atomic_inc_unchecked(&vcc->stats->tx);
31937 solos_pop(vcc, oldskb);
31938 } else
31939 dev_kfree_skb_irq(oldskb);
31940 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
31941 index 6dd3f59..ee377f3 100644
31942 --- a/drivers/atm/suni.c
31943 +++ b/drivers/atm/suni.c
31944 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
31945
31946
31947 #define ADD_LIMITED(s,v) \
31948 - atomic_add((v),&stats->s); \
31949 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
31950 + atomic_add_unchecked((v),&stats->s); \
31951 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
31952
31953
31954 static void suni_hz(unsigned long from_timer)
31955 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
31956 index fc8cb07..4a80e53 100644
31957 --- a/drivers/atm/uPD98402.c
31958 +++ b/drivers/atm/uPD98402.c
31959 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
31960 struct sonet_stats tmp;
31961 int error = 0;
31962
31963 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31964 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31965 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
31966 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
31967 if (zero && !error) {
31968 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
31969
31970
31971 #define ADD_LIMITED(s,v) \
31972 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
31973 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
31974 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31975 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
31976 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
31977 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31978
31979
31980 static void stat_event(struct atm_dev *dev)
31981 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
31982 if (reason & uPD98402_INT_PFM) stat_event(dev);
31983 if (reason & uPD98402_INT_PCO) {
31984 (void) GET(PCOCR); /* clear interrupt cause */
31985 - atomic_add(GET(HECCT),
31986 + atomic_add_unchecked(GET(HECCT),
31987 &PRIV(dev)->sonet_stats.uncorr_hcs);
31988 }
31989 if ((reason & uPD98402_INT_RFO) &&
31990 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
31991 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
31992 uPD98402_INT_LOS),PIMR); /* enable them */
31993 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
31994 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31995 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
31996 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
31997 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31998 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
31999 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
32000 return 0;
32001 }
32002
32003 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
32004 index 2e9635b..32927b4 100644
32005 --- a/drivers/atm/zatm.c
32006 +++ b/drivers/atm/zatm.c
32007 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32008 }
32009 if (!size) {
32010 dev_kfree_skb_irq(skb);
32011 - if (vcc) atomic_inc(&vcc->stats->rx_err);
32012 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
32013 continue;
32014 }
32015 if (!atm_charge(vcc,skb->truesize)) {
32016 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32017 skb->len = size;
32018 ATM_SKB(skb)->vcc = vcc;
32019 vcc->push(vcc,skb);
32020 - atomic_inc(&vcc->stats->rx);
32021 + atomic_inc_unchecked(&vcc->stats->rx);
32022 }
32023 zout(pos & 0xffff,MTA(mbx));
32024 #if 0 /* probably a stupid idea */
32025 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
32026 skb_queue_head(&zatm_vcc->backlog,skb);
32027 break;
32028 }
32029 - atomic_inc(&vcc->stats->tx);
32030 + atomic_inc_unchecked(&vcc->stats->tx);
32031 wake_up(&zatm_vcc->tx_wait);
32032 }
32033
32034 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
32035 index 63c143e..fece183 100644
32036 --- a/drivers/base/bus.c
32037 +++ b/drivers/base/bus.c
32038 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
32039 return ret;
32040 }
32041
32042 -static struct sysfs_ops driver_sysfs_ops = {
32043 +static const struct sysfs_ops driver_sysfs_ops = {
32044 .show = drv_attr_show,
32045 .store = drv_attr_store,
32046 };
32047 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
32048 return ret;
32049 }
32050
32051 -static struct sysfs_ops bus_sysfs_ops = {
32052 +static const struct sysfs_ops bus_sysfs_ops = {
32053 .show = bus_attr_show,
32054 .store = bus_attr_store,
32055 };
32056 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
32057 return 0;
32058 }
32059
32060 -static struct kset_uevent_ops bus_uevent_ops = {
32061 +static const struct kset_uevent_ops bus_uevent_ops = {
32062 .filter = bus_uevent_filter,
32063 };
32064
32065 diff --git a/drivers/base/class.c b/drivers/base/class.c
32066 index 6e2c3b0..cb61871 100644
32067 --- a/drivers/base/class.c
32068 +++ b/drivers/base/class.c
32069 @@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
32070 kfree(cp);
32071 }
32072
32073 -static struct sysfs_ops class_sysfs_ops = {
32074 +static const struct sysfs_ops class_sysfs_ops = {
32075 .show = class_attr_show,
32076 .store = class_attr_store,
32077 };
32078 diff --git a/drivers/base/core.c b/drivers/base/core.c
32079 index f33d768..a9358d0 100644
32080 --- a/drivers/base/core.c
32081 +++ b/drivers/base/core.c
32082 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
32083 return ret;
32084 }
32085
32086 -static struct sysfs_ops dev_sysfs_ops = {
32087 +static const struct sysfs_ops dev_sysfs_ops = {
32088 .show = dev_attr_show,
32089 .store = dev_attr_store,
32090 };
32091 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
32092 return retval;
32093 }
32094
32095 -static struct kset_uevent_ops device_uevent_ops = {
32096 +static const struct kset_uevent_ops device_uevent_ops = {
32097 .filter = dev_uevent_filter,
32098 .name = dev_uevent_name,
32099 .uevent = dev_uevent,
32100 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
32101 index 989429c..2272b00 100644
32102 --- a/drivers/base/memory.c
32103 +++ b/drivers/base/memory.c
32104 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
32105 return retval;
32106 }
32107
32108 -static struct kset_uevent_ops memory_uevent_ops = {
32109 +static const struct kset_uevent_ops memory_uevent_ops = {
32110 .name = memory_uevent_name,
32111 .uevent = memory_uevent,
32112 };
32113 diff --git a/drivers/base/sys.c b/drivers/base/sys.c
32114 index 3f202f7..61c4a6f 100644
32115 --- a/drivers/base/sys.c
32116 +++ b/drivers/base/sys.c
32117 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
32118 return -EIO;
32119 }
32120
32121 -static struct sysfs_ops sysfs_ops = {
32122 +static const struct sysfs_ops sysfs_ops = {
32123 .show = sysdev_show,
32124 .store = sysdev_store,
32125 };
32126 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
32127 return -EIO;
32128 }
32129
32130 -static struct sysfs_ops sysfs_class_ops = {
32131 +static const struct sysfs_ops sysfs_class_ops = {
32132 .show = sysdev_class_show,
32133 .store = sysdev_class_store,
32134 };
32135 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
32136 index eb4fa19..1954777 100644
32137 --- a/drivers/block/DAC960.c
32138 +++ b/drivers/block/DAC960.c
32139 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
32140 unsigned long flags;
32141 int Channel, TargetID;
32142
32143 + pax_track_stack();
32144 +
32145 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
32146 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
32147 sizeof(DAC960_SCSI_Inquiry_T) +
32148 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
32149 index 68b90d9..7e2e3f3 100644
32150 --- a/drivers/block/cciss.c
32151 +++ b/drivers/block/cciss.c
32152 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
32153 int err;
32154 u32 cp;
32155
32156 + memset(&arg64, 0, sizeof(arg64));
32157 +
32158 err = 0;
32159 err |=
32160 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
32161 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
32162 /* Wait (up to 20 seconds) for a command to complete */
32163
32164 for (i = 20 * HZ; i > 0; i--) {
32165 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
32166 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
32167 if (done == FIFO_EMPTY)
32168 schedule_timeout_uninterruptible(1);
32169 else
32170 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
32171 resend_cmd1:
32172
32173 /* Disable interrupt on the board. */
32174 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
32175 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
32176
32177 /* Make sure there is room in the command FIFO */
32178 /* Actually it should be completely empty at this time */
32179 @@ -2884,13 +2886,13 @@ resend_cmd1:
32180 /* tape side of the driver. */
32181 for (i = 200000; i > 0; i--) {
32182 /* if fifo isn't full go */
32183 - if (!(h->access.fifo_full(h)))
32184 + if (!(h->access->fifo_full(h)))
32185 break;
32186 udelay(10);
32187 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
32188 " waiting!\n", h->ctlr);
32189 }
32190 - h->access.submit_command(h, c); /* Send the cmd */
32191 + h->access->submit_command(h, c); /* Send the cmd */
32192 do {
32193 complete = pollcomplete(h->ctlr);
32194
32195 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
32196 while (!hlist_empty(&h->reqQ)) {
32197 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
32198 /* can't do anything if fifo is full */
32199 - if ((h->access.fifo_full(h))) {
32200 + if ((h->access->fifo_full(h))) {
32201 printk(KERN_WARNING "cciss: fifo full\n");
32202 break;
32203 }
32204 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
32205 h->Qdepth--;
32206
32207 /* Tell the controller execute command */
32208 - h->access.submit_command(h, c);
32209 + h->access->submit_command(h, c);
32210
32211 /* Put job onto the completed Q */
32212 addQ(&h->cmpQ, c);
32213 @@ -3393,17 +3395,17 @@ startio:
32214
32215 static inline unsigned long get_next_completion(ctlr_info_t *h)
32216 {
32217 - return h->access.command_completed(h);
32218 + return h->access->command_completed(h);
32219 }
32220
32221 static inline int interrupt_pending(ctlr_info_t *h)
32222 {
32223 - return h->access.intr_pending(h);
32224 + return h->access->intr_pending(h);
32225 }
32226
32227 static inline long interrupt_not_for_us(ctlr_info_t *h)
32228 {
32229 - return (((h->access.intr_pending(h) == 0) ||
32230 + return (((h->access->intr_pending(h) == 0) ||
32231 (h->interrupts_enabled == 0)));
32232 }
32233
32234 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
32235 */
32236 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
32237 c->product_name = products[prod_index].product_name;
32238 - c->access = *(products[prod_index].access);
32239 + c->access = products[prod_index].access;
32240 c->nr_cmds = c->max_commands - 4;
32241 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
32242 (readb(&c->cfgtable->Signature[1]) != 'I') ||
32243 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
32244 }
32245
32246 /* make sure the board interrupts are off */
32247 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
32248 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
32249 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
32250 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
32251 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
32252 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
32253 cciss_scsi_setup(i);
32254
32255 /* Turn the interrupts on so we can service requests */
32256 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
32257 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
32258
32259 /* Get the firmware version */
32260 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
32261 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
32262 index 04d6bf8..36e712d 100644
32263 --- a/drivers/block/cciss.h
32264 +++ b/drivers/block/cciss.h
32265 @@ -90,7 +90,7 @@ struct ctlr_info
32266 // information about each logical volume
32267 drive_info_struct *drv[CISS_MAX_LUN];
32268
32269 - struct access_method access;
32270 + struct access_method *access;
32271
32272 /* queue and queue Info */
32273 struct hlist_head reqQ;
32274 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
32275 index 6422651..bb1bdef 100644
32276 --- a/drivers/block/cpqarray.c
32277 +++ b/drivers/block/cpqarray.c
32278 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
32279 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
32280 goto Enomem4;
32281 }
32282 - hba[i]->access.set_intr_mask(hba[i], 0);
32283 + hba[i]->access->set_intr_mask(hba[i], 0);
32284 if (request_irq(hba[i]->intr, do_ida_intr,
32285 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
32286 {
32287 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
32288 add_timer(&hba[i]->timer);
32289
32290 /* Enable IRQ now that spinlock and rate limit timer are set up */
32291 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32292 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32293
32294 for(j=0; j<NWD; j++) {
32295 struct gendisk *disk = ida_gendisk[i][j];
32296 @@ -695,7 +695,7 @@ DBGINFO(
32297 for(i=0; i<NR_PRODUCTS; i++) {
32298 if (board_id == products[i].board_id) {
32299 c->product_name = products[i].product_name;
32300 - c->access = *(products[i].access);
32301 + c->access = products[i].access;
32302 break;
32303 }
32304 }
32305 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
32306 hba[ctlr]->intr = intr;
32307 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
32308 hba[ctlr]->product_name = products[j].product_name;
32309 - hba[ctlr]->access = *(products[j].access);
32310 + hba[ctlr]->access = products[j].access;
32311 hba[ctlr]->ctlr = ctlr;
32312 hba[ctlr]->board_id = board_id;
32313 hba[ctlr]->pci_dev = NULL; /* not PCI */
32314 @@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
32315 struct scatterlist tmp_sg[SG_MAX];
32316 int i, dir, seg;
32317
32318 + pax_track_stack();
32319 +
32320 if (blk_queue_plugged(q))
32321 goto startio;
32322
32323 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
32324
32325 while((c = h->reqQ) != NULL) {
32326 /* Can't do anything if we're busy */
32327 - if (h->access.fifo_full(h) == 0)
32328 + if (h->access->fifo_full(h) == 0)
32329 return;
32330
32331 /* Get the first entry from the request Q */
32332 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
32333 h->Qdepth--;
32334
32335 /* Tell the controller to do our bidding */
32336 - h->access.submit_command(h, c);
32337 + h->access->submit_command(h, c);
32338
32339 /* Get onto the completion Q */
32340 addQ(&h->cmpQ, c);
32341 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32342 unsigned long flags;
32343 __u32 a,a1;
32344
32345 - istat = h->access.intr_pending(h);
32346 + istat = h->access->intr_pending(h);
32347 /* Is this interrupt for us? */
32348 if (istat == 0)
32349 return IRQ_NONE;
32350 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32351 */
32352 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
32353 if (istat & FIFO_NOT_EMPTY) {
32354 - while((a = h->access.command_completed(h))) {
32355 + while((a = h->access->command_completed(h))) {
32356 a1 = a; a &= ~3;
32357 if ((c = h->cmpQ) == NULL)
32358 {
32359 @@ -1434,11 +1436,11 @@ static int sendcmd(
32360 /*
32361 * Disable interrupt
32362 */
32363 - info_p->access.set_intr_mask(info_p, 0);
32364 + info_p->access->set_intr_mask(info_p, 0);
32365 /* Make sure there is room in the command FIFO */
32366 /* Actually it should be completely empty at this time. */
32367 for (i = 200000; i > 0; i--) {
32368 - temp = info_p->access.fifo_full(info_p);
32369 + temp = info_p->access->fifo_full(info_p);
32370 if (temp != 0) {
32371 break;
32372 }
32373 @@ -1451,7 +1453,7 @@ DBG(
32374 /*
32375 * Send the cmd
32376 */
32377 - info_p->access.submit_command(info_p, c);
32378 + info_p->access->submit_command(info_p, c);
32379 complete = pollcomplete(ctlr);
32380
32381 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
32382 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
32383 * we check the new geometry. Then turn interrupts back on when
32384 * we're done.
32385 */
32386 - host->access.set_intr_mask(host, 0);
32387 + host->access->set_intr_mask(host, 0);
32388 getgeometry(ctlr);
32389 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
32390 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
32391
32392 for(i=0; i<NWD; i++) {
32393 struct gendisk *disk = ida_gendisk[ctlr][i];
32394 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
32395 /* Wait (up to 2 seconds) for a command to complete */
32396
32397 for (i = 200000; i > 0; i--) {
32398 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
32399 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
32400 if (done == 0) {
32401 udelay(10); /* a short fixed delay */
32402 } else
32403 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
32404 index be73e9d..7fbf140 100644
32405 --- a/drivers/block/cpqarray.h
32406 +++ b/drivers/block/cpqarray.h
32407 @@ -99,7 +99,7 @@ struct ctlr_info {
32408 drv_info_t drv[NWD];
32409 struct proc_dir_entry *proc;
32410
32411 - struct access_method access;
32412 + struct access_method *access;
32413
32414 cmdlist_t *reqQ;
32415 cmdlist_t *cmpQ;
32416 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
32417 index 8ec2d70..2804b30 100644
32418 --- a/drivers/block/loop.c
32419 +++ b/drivers/block/loop.c
32420 @@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
32421 mm_segment_t old_fs = get_fs();
32422
32423 set_fs(get_ds());
32424 - bw = file->f_op->write(file, buf, len, &pos);
32425 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
32426 set_fs(old_fs);
32427 if (likely(bw == len))
32428 return 0;
32429 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
32430 index 26ada47..083c480 100644
32431 --- a/drivers/block/nbd.c
32432 +++ b/drivers/block/nbd.c
32433 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
32434 struct kvec iov;
32435 sigset_t blocked, oldset;
32436
32437 + pax_track_stack();
32438 +
32439 if (unlikely(!sock)) {
32440 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
32441 lo->disk->disk_name, (send ? "send" : "recv"));
32442 @@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
32443 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
32444 unsigned int cmd, unsigned long arg)
32445 {
32446 + pax_track_stack();
32447 +
32448 switch (cmd) {
32449 case NBD_DISCONNECT: {
32450 struct request sreq;
32451 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
32452 index a5d585d..d087be3 100644
32453 --- a/drivers/block/pktcdvd.c
32454 +++ b/drivers/block/pktcdvd.c
32455 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
32456 return len;
32457 }
32458
32459 -static struct sysfs_ops kobj_pkt_ops = {
32460 +static const struct sysfs_ops kobj_pkt_ops = {
32461 .show = kobj_pkt_show,
32462 .store = kobj_pkt_store
32463 };
32464 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
32465 index 6aad99e..89cd142 100644
32466 --- a/drivers/char/Kconfig
32467 +++ b/drivers/char/Kconfig
32468 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
32469
32470 config DEVKMEM
32471 bool "/dev/kmem virtual device support"
32472 - default y
32473 + default n
32474 + depends on !GRKERNSEC_KMEM
32475 help
32476 Say Y here if you want to support the /dev/kmem device. The
32477 /dev/kmem device is rarely used, but can be used for certain
32478 @@ -1114,6 +1115,7 @@ config DEVPORT
32479 bool
32480 depends on !M68K
32481 depends on ISA || PCI
32482 + depends on !GRKERNSEC_KMEM
32483 default y
32484
32485 source "drivers/s390/char/Kconfig"
32486 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
32487 index a96f319..a778a5b 100644
32488 --- a/drivers/char/agp/frontend.c
32489 +++ b/drivers/char/agp/frontend.c
32490 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
32491 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
32492 return -EFAULT;
32493
32494 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
32495 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
32496 return -EFAULT;
32497
32498 client = agp_find_client_by_pid(reserve.pid);
32499 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
32500 index d8cff90..9628e70 100644
32501 --- a/drivers/char/briq_panel.c
32502 +++ b/drivers/char/briq_panel.c
32503 @@ -10,6 +10,7 @@
32504 #include <linux/types.h>
32505 #include <linux/errno.h>
32506 #include <linux/tty.h>
32507 +#include <linux/mutex.h>
32508 #include <linux/timer.h>
32509 #include <linux/kernel.h>
32510 #include <linux/wait.h>
32511 @@ -36,6 +37,7 @@ static int vfd_is_open;
32512 static unsigned char vfd[40];
32513 static int vfd_cursor;
32514 static unsigned char ledpb, led;
32515 +static DEFINE_MUTEX(vfd_mutex);
32516
32517 static void update_vfd(void)
32518 {
32519 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
32520 if (!vfd_is_open)
32521 return -EBUSY;
32522
32523 + mutex_lock(&vfd_mutex);
32524 for (;;) {
32525 char c;
32526 if (!indx)
32527 break;
32528 - if (get_user(c, buf))
32529 + if (get_user(c, buf)) {
32530 + mutex_unlock(&vfd_mutex);
32531 return -EFAULT;
32532 + }
32533 if (esc) {
32534 set_led(c);
32535 esc = 0;
32536 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
32537 buf++;
32538 }
32539 update_vfd();
32540 + mutex_unlock(&vfd_mutex);
32541
32542 return len;
32543 }
32544 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
32545 index 31e7c91..161afc0 100644
32546 --- a/drivers/char/genrtc.c
32547 +++ b/drivers/char/genrtc.c
32548 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
32549 switch (cmd) {
32550
32551 case RTC_PLL_GET:
32552 + memset(&pll, 0, sizeof(pll));
32553 if (get_rtc_pll(&pll))
32554 return -EINVAL;
32555 else
32556 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
32557 index 006466d..a2bb21c 100644
32558 --- a/drivers/char/hpet.c
32559 +++ b/drivers/char/hpet.c
32560 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
32561 return 0;
32562 }
32563
32564 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
32565 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
32566
32567 static int
32568 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
32569 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
32570 }
32571
32572 static int
32573 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
32574 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
32575 {
32576 struct hpet_timer __iomem *timer;
32577 struct hpet __iomem *hpet;
32578 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
32579 {
32580 struct hpet_info info;
32581
32582 + memset(&info, 0, sizeof(info));
32583 +
32584 if (devp->hd_ireqfreq)
32585 info.hi_ireqfreq =
32586 hpet_time_div(hpetp, devp->hd_ireqfreq);
32587 - else
32588 - info.hi_ireqfreq = 0;
32589 info.hi_flags =
32590 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
32591 info.hi_hpet = hpetp->hp_which;
32592 diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
32593 index 0afc8b8..6913fc3 100644
32594 --- a/drivers/char/hvc_beat.c
32595 +++ b/drivers/char/hvc_beat.c
32596 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
32597 return cnt;
32598 }
32599
32600 -static struct hv_ops hvc_beat_get_put_ops = {
32601 +static const struct hv_ops hvc_beat_get_put_ops = {
32602 .get_chars = hvc_beat_get_chars,
32603 .put_chars = hvc_beat_put_chars,
32604 };
32605 diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
32606 index 98097f2..407dddc 100644
32607 --- a/drivers/char/hvc_console.c
32608 +++ b/drivers/char/hvc_console.c
32609 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
32610 * console interfaces but can still be used as a tty device. This has to be
32611 * static because kmalloc will not work during early console init.
32612 */
32613 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
32614 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
32615 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
32616 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
32617
32618 @@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
32619 * vty adapters do NOT get an hvc_instantiate() callback since they
32620 * appear after early console init.
32621 */
32622 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
32623 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
32624 {
32625 struct hvc_struct *hp;
32626
32627 @@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
32628 };
32629
32630 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
32631 - struct hv_ops *ops, int outbuf_size)
32632 + const struct hv_ops *ops, int outbuf_size)
32633 {
32634 struct hvc_struct *hp;
32635 int i;
32636 diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
32637 index 10950ca..ed176c3 100644
32638 --- a/drivers/char/hvc_console.h
32639 +++ b/drivers/char/hvc_console.h
32640 @@ -55,7 +55,7 @@ struct hvc_struct {
32641 int outbuf_size;
32642 int n_outbuf;
32643 uint32_t vtermno;
32644 - struct hv_ops *ops;
32645 + const struct hv_ops *ops;
32646 int irq_requested;
32647 int data;
32648 struct winsize ws;
32649 @@ -76,11 +76,11 @@ struct hv_ops {
32650 };
32651
32652 /* Register a vterm and a slot index for use as a console (console_init) */
32653 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
32654 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
32655
32656 /* register a vterm for hvc tty operation (module_init or hotplug add) */
32657 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
32658 - struct hv_ops *ops, int outbuf_size);
32659 + const struct hv_ops *ops, int outbuf_size);
32660 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
32661 extern int hvc_remove(struct hvc_struct *hp);
32662
32663 diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
32664 index 936d05b..fd02426 100644
32665 --- a/drivers/char/hvc_iseries.c
32666 +++ b/drivers/char/hvc_iseries.c
32667 @@ -197,7 +197,7 @@ done:
32668 return sent;
32669 }
32670
32671 -static struct hv_ops hvc_get_put_ops = {
32672 +static const struct hv_ops hvc_get_put_ops = {
32673 .get_chars = get_chars,
32674 .put_chars = put_chars,
32675 .notifier_add = notifier_add_irq,
32676 diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
32677 index b0e168f..69cda2a 100644
32678 --- a/drivers/char/hvc_iucv.c
32679 +++ b/drivers/char/hvc_iucv.c
32680 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
32681
32682
32683 /* HVC operations */
32684 -static struct hv_ops hvc_iucv_ops = {
32685 +static const struct hv_ops hvc_iucv_ops = {
32686 .get_chars = hvc_iucv_get_chars,
32687 .put_chars = hvc_iucv_put_chars,
32688 .notifier_add = hvc_iucv_notifier_add,
32689 diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
32690 index 88590d0..61c4a61 100644
32691 --- a/drivers/char/hvc_rtas.c
32692 +++ b/drivers/char/hvc_rtas.c
32693 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
32694 return i;
32695 }
32696
32697 -static struct hv_ops hvc_rtas_get_put_ops = {
32698 +static const struct hv_ops hvc_rtas_get_put_ops = {
32699 .get_chars = hvc_rtas_read_console,
32700 .put_chars = hvc_rtas_write_console,
32701 };
32702 diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
32703 index bd63ba8..b0957e6 100644
32704 --- a/drivers/char/hvc_udbg.c
32705 +++ b/drivers/char/hvc_udbg.c
32706 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
32707 return i;
32708 }
32709
32710 -static struct hv_ops hvc_udbg_ops = {
32711 +static const struct hv_ops hvc_udbg_ops = {
32712 .get_chars = hvc_udbg_get,
32713 .put_chars = hvc_udbg_put,
32714 };
32715 diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
32716 index 10be343..27370e9 100644
32717 --- a/drivers/char/hvc_vio.c
32718 +++ b/drivers/char/hvc_vio.c
32719 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
32720 return got;
32721 }
32722
32723 -static struct hv_ops hvc_get_put_ops = {
32724 +static const struct hv_ops hvc_get_put_ops = {
32725 .get_chars = filtered_get_chars,
32726 .put_chars = hvc_put_chars,
32727 .notifier_add = notifier_add_irq,
32728 diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
32729 index a6ee32b..94f8c26 100644
32730 --- a/drivers/char/hvc_xen.c
32731 +++ b/drivers/char/hvc_xen.c
32732 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
32733 return recv;
32734 }
32735
32736 -static struct hv_ops hvc_ops = {
32737 +static const struct hv_ops hvc_ops = {
32738 .get_chars = read_console,
32739 .put_chars = write_console,
32740 .notifier_add = notifier_add_irq,
32741 diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
32742 index 266b858..f3ee0bb 100644
32743 --- a/drivers/char/hvcs.c
32744 +++ b/drivers/char/hvcs.c
32745 @@ -82,6 +82,7 @@
32746 #include <asm/hvcserver.h>
32747 #include <asm/uaccess.h>
32748 #include <asm/vio.h>
32749 +#include <asm/local.h>
32750
32751 /*
32752 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32753 @@ -269,7 +270,7 @@ struct hvcs_struct {
32754 unsigned int index;
32755
32756 struct tty_struct *tty;
32757 - int open_count;
32758 + local_t open_count;
32759
32760 /*
32761 * Used to tell the driver kernel_thread what operations need to take
32762 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
32763
32764 spin_lock_irqsave(&hvcsd->lock, flags);
32765
32766 - if (hvcsd->open_count > 0) {
32767 + if (local_read(&hvcsd->open_count) > 0) {
32768 spin_unlock_irqrestore(&hvcsd->lock, flags);
32769 printk(KERN_INFO "HVCS: vterm state unchanged. "
32770 "The hvcs device node is still in use.\n");
32771 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
32772 if ((retval = hvcs_partner_connect(hvcsd)))
32773 goto error_release;
32774
32775 - hvcsd->open_count = 1;
32776 + local_set(&hvcsd->open_count, 1);
32777 hvcsd->tty = tty;
32778 tty->driver_data = hvcsd;
32779
32780 @@ -1169,7 +1170,7 @@ fast_open:
32781
32782 spin_lock_irqsave(&hvcsd->lock, flags);
32783 kref_get(&hvcsd->kref);
32784 - hvcsd->open_count++;
32785 + local_inc(&hvcsd->open_count);
32786 hvcsd->todo_mask |= HVCS_SCHED_READ;
32787 spin_unlock_irqrestore(&hvcsd->lock, flags);
32788
32789 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
32790 hvcsd = tty->driver_data;
32791
32792 spin_lock_irqsave(&hvcsd->lock, flags);
32793 - if (--hvcsd->open_count == 0) {
32794 + if (local_dec_and_test(&hvcsd->open_count)) {
32795
32796 vio_disable_interrupts(hvcsd->vdev);
32797
32798 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
32799 free_irq(irq, hvcsd);
32800 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32801 return;
32802 - } else if (hvcsd->open_count < 0) {
32803 + } else if (local_read(&hvcsd->open_count) < 0) {
32804 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32805 " is missmanaged.\n",
32806 - hvcsd->vdev->unit_address, hvcsd->open_count);
32807 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32808 }
32809
32810 spin_unlock_irqrestore(&hvcsd->lock, flags);
32811 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
32812
32813 spin_lock_irqsave(&hvcsd->lock, flags);
32814 /* Preserve this so that we know how many kref refs to put */
32815 - temp_open_count = hvcsd->open_count;
32816 + temp_open_count = local_read(&hvcsd->open_count);
32817
32818 /*
32819 * Don't kref put inside the spinlock because the destruction
32820 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
32821 hvcsd->tty->driver_data = NULL;
32822 hvcsd->tty = NULL;
32823
32824 - hvcsd->open_count = 0;
32825 + local_set(&hvcsd->open_count, 0);
32826
32827 /* This will drop any buffered data on the floor which is OK in a hangup
32828 * scenario. */
32829 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
32830 * the middle of a write operation? This is a crummy place to do this
32831 * but we want to keep it all in the spinlock.
32832 */
32833 - if (hvcsd->open_count <= 0) {
32834 + if (local_read(&hvcsd->open_count) <= 0) {
32835 spin_unlock_irqrestore(&hvcsd->lock, flags);
32836 return -ENODEV;
32837 }
32838 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
32839 {
32840 struct hvcs_struct *hvcsd = tty->driver_data;
32841
32842 - if (!hvcsd || hvcsd->open_count <= 0)
32843 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32844 return 0;
32845
32846 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32847 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
32848 index ec5e3f8..02455ba 100644
32849 --- a/drivers/char/ipmi/ipmi_msghandler.c
32850 +++ b/drivers/char/ipmi/ipmi_msghandler.c
32851 @@ -414,7 +414,7 @@ struct ipmi_smi {
32852 struct proc_dir_entry *proc_dir;
32853 char proc_dir_name[10];
32854
32855 - atomic_t stats[IPMI_NUM_STATS];
32856 + atomic_unchecked_t stats[IPMI_NUM_STATS];
32857
32858 /*
32859 * run_to_completion duplicate of smb_info, smi_info
32860 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
32861
32862
32863 #define ipmi_inc_stat(intf, stat) \
32864 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
32865 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
32866 #define ipmi_get_stat(intf, stat) \
32867 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
32868 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
32869
32870 static int is_lan_addr(struct ipmi_addr *addr)
32871 {
32872 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
32873 INIT_LIST_HEAD(&intf->cmd_rcvrs);
32874 init_waitqueue_head(&intf->waitq);
32875 for (i = 0; i < IPMI_NUM_STATS; i++)
32876 - atomic_set(&intf->stats[i], 0);
32877 + atomic_set_unchecked(&intf->stats[i], 0);
32878
32879 intf->proc_dir = NULL;
32880
32881 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
32882 struct ipmi_smi_msg smi_msg;
32883 struct ipmi_recv_msg recv_msg;
32884
32885 + pax_track_stack();
32886 +
32887 si = (struct ipmi_system_interface_addr *) &addr;
32888 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
32889 si->channel = IPMI_BMC_CHANNEL;
32890 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
32891 index abae8c9..8021979 100644
32892 --- a/drivers/char/ipmi/ipmi_si_intf.c
32893 +++ b/drivers/char/ipmi/ipmi_si_intf.c
32894 @@ -277,7 +277,7 @@ struct smi_info {
32895 unsigned char slave_addr;
32896
32897 /* Counters and things for the proc filesystem. */
32898 - atomic_t stats[SI_NUM_STATS];
32899 + atomic_unchecked_t stats[SI_NUM_STATS];
32900
32901 struct task_struct *thread;
32902
32903 @@ -285,9 +285,9 @@ struct smi_info {
32904 };
32905
32906 #define smi_inc_stat(smi, stat) \
32907 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
32908 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
32909 #define smi_get_stat(smi, stat) \
32910 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
32911 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
32912
32913 #define SI_MAX_PARMS 4
32914
32915 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
32916 atomic_set(&new_smi->req_events, 0);
32917 new_smi->run_to_completion = 0;
32918 for (i = 0; i < SI_NUM_STATS; i++)
32919 - atomic_set(&new_smi->stats[i], 0);
32920 + atomic_set_unchecked(&new_smi->stats[i], 0);
32921
32922 new_smi->interrupt_disabled = 0;
32923 atomic_set(&new_smi->stop_operation, 0);
32924 diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
32925 index 402838f..55e2200 100644
32926 --- a/drivers/char/istallion.c
32927 +++ b/drivers/char/istallion.c
32928 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
32929 * re-used for each stats call.
32930 */
32931 static comstats_t stli_comstats;
32932 -static combrd_t stli_brdstats;
32933 static struct asystats stli_cdkstats;
32934
32935 /*****************************************************************************/
32936 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
32937 {
32938 struct stlibrd *brdp;
32939 unsigned int i;
32940 + combrd_t stli_brdstats;
32941
32942 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
32943 return -EFAULT;
32944 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
32945 struct stliport stli_dummyport;
32946 struct stliport *portp;
32947
32948 + pax_track_stack();
32949 +
32950 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32951 return -EFAULT;
32952 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32953 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
32954 struct stlibrd stli_dummybrd;
32955 struct stlibrd *brdp;
32956
32957 + pax_track_stack();
32958 +
32959 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32960 return -EFAULT;
32961 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32962 diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
32963 index 950837c..e55a288 100644
32964 --- a/drivers/char/keyboard.c
32965 +++ b/drivers/char/keyboard.c
32966 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
32967 kbd->kbdmode == VC_MEDIUMRAW) &&
32968 value != KVAL(K_SAK))
32969 return; /* SAK is allowed even in raw mode */
32970 +
32971 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
32972 + {
32973 + void *func = fn_handler[value];
32974 + if (func == fn_show_state || func == fn_show_ptregs ||
32975 + func == fn_show_mem)
32976 + return;
32977 + }
32978 +#endif
32979 +
32980 fn_handler[value](vc);
32981 }
32982
32983 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
32984 .evbit = { BIT_MASK(EV_SND) },
32985 },
32986
32987 - { }, /* Terminating entry */
32988 + { 0 }, /* Terminating entry */
32989 };
32990
32991 MODULE_DEVICE_TABLE(input, kbd_ids);
32992 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
32993 index 87c67b4..230527a 100644
32994 --- a/drivers/char/mbcs.c
32995 +++ b/drivers/char/mbcs.c
32996 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
32997 return 0;
32998 }
32999
33000 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
33001 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
33002 {
33003 .part_num = MBCS_PART_NUM,
33004 .mfg_num = MBCS_MFG_NUM,
33005 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
33006 index 1270f64..8495f49 100644
33007 --- a/drivers/char/mem.c
33008 +++ b/drivers/char/mem.c
33009 @@ -18,6 +18,7 @@
33010 #include <linux/raw.h>
33011 #include <linux/tty.h>
33012 #include <linux/capability.h>
33013 +#include <linux/security.h>
33014 #include <linux/ptrace.h>
33015 #include <linux/device.h>
33016 #include <linux/highmem.h>
33017 @@ -35,6 +36,10 @@
33018 # include <linux/efi.h>
33019 #endif
33020
33021 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33022 +extern struct file_operations grsec_fops;
33023 +#endif
33024 +
33025 static inline unsigned long size_inside_page(unsigned long start,
33026 unsigned long size)
33027 {
33028 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33029
33030 while (cursor < to) {
33031 if (!devmem_is_allowed(pfn)) {
33032 +#ifdef CONFIG_GRKERNSEC_KMEM
33033 + gr_handle_mem_readwrite(from, to);
33034 +#else
33035 printk(KERN_INFO
33036 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
33037 current->comm, from, to);
33038 +#endif
33039 return 0;
33040 }
33041 cursor += PAGE_SIZE;
33042 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33043 }
33044 return 1;
33045 }
33046 +#elif defined(CONFIG_GRKERNSEC_KMEM)
33047 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33048 +{
33049 + return 0;
33050 +}
33051 #else
33052 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33053 {
33054 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
33055 #endif
33056
33057 while (count > 0) {
33058 + char *temp;
33059 +
33060 /*
33061 * Handle first page in case it's not aligned
33062 */
33063 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
33064 if (!ptr)
33065 return -EFAULT;
33066
33067 - if (copy_to_user(buf, ptr, sz)) {
33068 +#ifdef CONFIG_PAX_USERCOPY
33069 + temp = kmalloc(sz, GFP_KERNEL);
33070 + if (!temp) {
33071 + unxlate_dev_mem_ptr(p, ptr);
33072 + return -ENOMEM;
33073 + }
33074 + memcpy(temp, ptr, sz);
33075 +#else
33076 + temp = ptr;
33077 +#endif
33078 +
33079 + if (copy_to_user(buf, temp, sz)) {
33080 +
33081 +#ifdef CONFIG_PAX_USERCOPY
33082 + kfree(temp);
33083 +#endif
33084 +
33085 unxlate_dev_mem_ptr(p, ptr);
33086 return -EFAULT;
33087 }
33088
33089 +#ifdef CONFIG_PAX_USERCOPY
33090 + kfree(temp);
33091 +#endif
33092 +
33093 unxlate_dev_mem_ptr(p, ptr);
33094
33095 buf += sz;
33096 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33097 size_t count, loff_t *ppos)
33098 {
33099 unsigned long p = *ppos;
33100 - ssize_t low_count, read, sz;
33101 + ssize_t low_count, read, sz, err = 0;
33102 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
33103 - int err = 0;
33104
33105 read = 0;
33106 if (p < (unsigned long) high_memory) {
33107 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33108 }
33109 #endif
33110 while (low_count > 0) {
33111 + char *temp;
33112 +
33113 sz = size_inside_page(p, low_count);
33114
33115 /*
33116 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33117 */
33118 kbuf = xlate_dev_kmem_ptr((char *)p);
33119
33120 - if (copy_to_user(buf, kbuf, sz))
33121 +#ifdef CONFIG_PAX_USERCOPY
33122 + temp = kmalloc(sz, GFP_KERNEL);
33123 + if (!temp)
33124 + return -ENOMEM;
33125 + memcpy(temp, kbuf, sz);
33126 +#else
33127 + temp = kbuf;
33128 +#endif
33129 +
33130 + err = copy_to_user(buf, temp, sz);
33131 +
33132 +#ifdef CONFIG_PAX_USERCOPY
33133 + kfree(temp);
33134 +#endif
33135 +
33136 + if (err)
33137 return -EFAULT;
33138 buf += sz;
33139 p += sz;
33140 @@ -889,6 +941,9 @@ static const struct memdev {
33141 #ifdef CONFIG_CRASH_DUMP
33142 [12] = { "oldmem", 0, &oldmem_fops, NULL },
33143 #endif
33144 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33145 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
33146 +#endif
33147 };
33148
33149 static int memory_open(struct inode *inode, struct file *filp)
33150 diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
33151 index 918711a..4ffaf5e 100644
33152 --- a/drivers/char/mmtimer.c
33153 +++ b/drivers/char/mmtimer.c
33154 @@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
33155 return err;
33156 }
33157
33158 -static struct k_clock sgi_clock = {
33159 +static k_clock_no_const sgi_clock = {
33160 .res = 0,
33161 .clock_set = sgi_clock_set,
33162 .clock_get = sgi_clock_get,
33163 diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
33164 index 674b3ab..a8d1970 100644
33165 --- a/drivers/char/pcmcia/ipwireless/tty.c
33166 +++ b/drivers/char/pcmcia/ipwireless/tty.c
33167 @@ -29,6 +29,7 @@
33168 #include <linux/tty_driver.h>
33169 #include <linux/tty_flip.h>
33170 #include <linux/uaccess.h>
33171 +#include <asm/local.h>
33172
33173 #include "tty.h"
33174 #include "network.h"
33175 @@ -51,7 +52,7 @@ struct ipw_tty {
33176 int tty_type;
33177 struct ipw_network *network;
33178 struct tty_struct *linux_tty;
33179 - int open_count;
33180 + local_t open_count;
33181 unsigned int control_lines;
33182 struct mutex ipw_tty_mutex;
33183 int tx_bytes_queued;
33184 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
33185 mutex_unlock(&tty->ipw_tty_mutex);
33186 return -ENODEV;
33187 }
33188 - if (tty->open_count == 0)
33189 + if (local_read(&tty->open_count) == 0)
33190 tty->tx_bytes_queued = 0;
33191
33192 - tty->open_count++;
33193 + local_inc(&tty->open_count);
33194
33195 tty->linux_tty = linux_tty;
33196 linux_tty->driver_data = tty;
33197 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
33198
33199 static void do_ipw_close(struct ipw_tty *tty)
33200 {
33201 - tty->open_count--;
33202 -
33203 - if (tty->open_count == 0) {
33204 + if (local_dec_return(&tty->open_count) == 0) {
33205 struct tty_struct *linux_tty = tty->linux_tty;
33206
33207 if (linux_tty != NULL) {
33208 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
33209 return;
33210
33211 mutex_lock(&tty->ipw_tty_mutex);
33212 - if (tty->open_count == 0) {
33213 + if (local_read(&tty->open_count) == 0) {
33214 mutex_unlock(&tty->ipw_tty_mutex);
33215 return;
33216 }
33217 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
33218 return;
33219 }
33220
33221 - if (!tty->open_count) {
33222 + if (!local_read(&tty->open_count)) {
33223 mutex_unlock(&tty->ipw_tty_mutex);
33224 return;
33225 }
33226 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
33227 return -ENODEV;
33228
33229 mutex_lock(&tty->ipw_tty_mutex);
33230 - if (!tty->open_count) {
33231 + if (!local_read(&tty->open_count)) {
33232 mutex_unlock(&tty->ipw_tty_mutex);
33233 return -EINVAL;
33234 }
33235 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
33236 if (!tty)
33237 return -ENODEV;
33238
33239 - if (!tty->open_count)
33240 + if (!local_read(&tty->open_count))
33241 return -EINVAL;
33242
33243 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
33244 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
33245 if (!tty)
33246 return 0;
33247
33248 - if (!tty->open_count)
33249 + if (!local_read(&tty->open_count))
33250 return 0;
33251
33252 return tty->tx_bytes_queued;
33253 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
33254 if (!tty)
33255 return -ENODEV;
33256
33257 - if (!tty->open_count)
33258 + if (!local_read(&tty->open_count))
33259 return -EINVAL;
33260
33261 return get_control_lines(tty);
33262 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
33263 if (!tty)
33264 return -ENODEV;
33265
33266 - if (!tty->open_count)
33267 + if (!local_read(&tty->open_count))
33268 return -EINVAL;
33269
33270 return set_control_lines(tty, set, clear);
33271 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
33272 if (!tty)
33273 return -ENODEV;
33274
33275 - if (!tty->open_count)
33276 + if (!local_read(&tty->open_count))
33277 return -EINVAL;
33278
33279 /* FIXME: Exactly how is the tty object locked here .. */
33280 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
33281 against a parallel ioctl etc */
33282 mutex_lock(&ttyj->ipw_tty_mutex);
33283 }
33284 - while (ttyj->open_count)
33285 + while (local_read(&ttyj->open_count))
33286 do_ipw_close(ttyj);
33287 ipwireless_disassociate_network_ttys(network,
33288 ttyj->channel_idx);
33289 diff --git a/drivers/char/pty.c b/drivers/char/pty.c
33290 index 62f282e..e45c45c 100644
33291 --- a/drivers/char/pty.c
33292 +++ b/drivers/char/pty.c
33293 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
33294 register_sysctl_table(pty_root_table);
33295
33296 /* Now create the /dev/ptmx special device */
33297 + pax_open_kernel();
33298 tty_default_fops(&ptmx_fops);
33299 - ptmx_fops.open = ptmx_open;
33300 + *(void **)&ptmx_fops.open = ptmx_open;
33301 + pax_close_kernel();
33302
33303 cdev_init(&ptmx_cdev, &ptmx_fops);
33304 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
33305 diff --git a/drivers/char/random.c b/drivers/char/random.c
33306 index 3a19e2d..6ed09d3 100644
33307 --- a/drivers/char/random.c
33308 +++ b/drivers/char/random.c
33309 @@ -254,8 +254,13 @@
33310 /*
33311 * Configuration information
33312 */
33313 +#ifdef CONFIG_GRKERNSEC_RANDNET
33314 +#define INPUT_POOL_WORDS 512
33315 +#define OUTPUT_POOL_WORDS 128
33316 +#else
33317 #define INPUT_POOL_WORDS 128
33318 #define OUTPUT_POOL_WORDS 32
33319 +#endif
33320 #define SEC_XFER_SIZE 512
33321
33322 /*
33323 @@ -292,10 +297,17 @@ static struct poolinfo {
33324 int poolwords;
33325 int tap1, tap2, tap3, tap4, tap5;
33326 } poolinfo_table[] = {
33327 +#ifdef CONFIG_GRKERNSEC_RANDNET
33328 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
33329 + { 512, 411, 308, 208, 104, 1 },
33330 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
33331 + { 128, 103, 76, 51, 25, 1 },
33332 +#else
33333 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
33334 { 128, 103, 76, 51, 25, 1 },
33335 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
33336 { 32, 26, 20, 14, 7, 1 },
33337 +#endif
33338 #if 0
33339 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
33340 { 2048, 1638, 1231, 819, 411, 1 },
33341 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
33342 #include <linux/sysctl.h>
33343
33344 static int min_read_thresh = 8, min_write_thresh;
33345 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
33346 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
33347 static int max_write_thresh = INPUT_POOL_WORDS * 32;
33348 static char sysctl_bootid[16];
33349
33350 diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
33351 index 0e29a23..0efc2c2 100644
33352 --- a/drivers/char/rocket.c
33353 +++ b/drivers/char/rocket.c
33354 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
33355 struct rocket_ports tmp;
33356 int board;
33357
33358 + pax_track_stack();
33359 +
33360 if (!retports)
33361 return -EFAULT;
33362 memset(&tmp, 0, sizeof (tmp));
33363 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
33364 index 8c262aa..4d3b058 100644
33365 --- a/drivers/char/sonypi.c
33366 +++ b/drivers/char/sonypi.c
33367 @@ -55,6 +55,7 @@
33368 #include <asm/uaccess.h>
33369 #include <asm/io.h>
33370 #include <asm/system.h>
33371 +#include <asm/local.h>
33372
33373 #include <linux/sonypi.h>
33374
33375 @@ -491,7 +492,7 @@ static struct sonypi_device {
33376 spinlock_t fifo_lock;
33377 wait_queue_head_t fifo_proc_list;
33378 struct fasync_struct *fifo_async;
33379 - int open_count;
33380 + local_t open_count;
33381 int model;
33382 struct input_dev *input_jog_dev;
33383 struct input_dev *input_key_dev;
33384 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
33385 static int sonypi_misc_release(struct inode *inode, struct file *file)
33386 {
33387 mutex_lock(&sonypi_device.lock);
33388 - sonypi_device.open_count--;
33389 + local_dec(&sonypi_device.open_count);
33390 mutex_unlock(&sonypi_device.lock);
33391 return 0;
33392 }
33393 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
33394 lock_kernel();
33395 mutex_lock(&sonypi_device.lock);
33396 /* Flush input queue on first open */
33397 - if (!sonypi_device.open_count)
33398 + if (!local_read(&sonypi_device.open_count))
33399 kfifo_reset(sonypi_device.fifo);
33400 - sonypi_device.open_count++;
33401 + local_inc(&sonypi_device.open_count);
33402 mutex_unlock(&sonypi_device.lock);
33403 unlock_kernel();
33404 return 0;
33405 diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
33406 index db6dcfa..13834cb 100644
33407 --- a/drivers/char/stallion.c
33408 +++ b/drivers/char/stallion.c
33409 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
33410 struct stlport stl_dummyport;
33411 struct stlport *portp;
33412
33413 + pax_track_stack();
33414 +
33415 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
33416 return -EFAULT;
33417 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
33418 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
33419 index a0789f6..cea3902 100644
33420 --- a/drivers/char/tpm/tpm.c
33421 +++ b/drivers/char/tpm/tpm.c
33422 @@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
33423 chip->vendor.req_complete_val)
33424 goto out_recv;
33425
33426 - if ((status == chip->vendor.req_canceled)) {
33427 + if (status == chip->vendor.req_canceled) {
33428 dev_err(chip->dev, "Operation Canceled\n");
33429 rc = -ECANCELED;
33430 goto out;
33431 @@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
33432
33433 struct tpm_chip *chip = dev_get_drvdata(dev);
33434
33435 + pax_track_stack();
33436 +
33437 tpm_cmd.header.in = tpm_readpubek_header;
33438 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
33439 "attempting to read the PUBEK");
33440 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
33441 index bf2170f..ce8cab9 100644
33442 --- a/drivers/char/tpm/tpm_bios.c
33443 +++ b/drivers/char/tpm/tpm_bios.c
33444 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
33445 event = addr;
33446
33447 if ((event->event_type == 0 && event->event_size == 0) ||
33448 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
33449 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
33450 return NULL;
33451
33452 return addr;
33453 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
33454 return NULL;
33455
33456 if ((event->event_type == 0 && event->event_size == 0) ||
33457 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
33458 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
33459 return NULL;
33460
33461 (*pos)++;
33462 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
33463 int i;
33464
33465 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
33466 - seq_putc(m, data[i]);
33467 + if (!seq_putc(m, data[i]))
33468 + return -EFAULT;
33469
33470 return 0;
33471 }
33472 @@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
33473 log->bios_event_log_end = log->bios_event_log + len;
33474
33475 virt = acpi_os_map_memory(start, len);
33476 + if (!virt) {
33477 + kfree(log->bios_event_log);
33478 + log->bios_event_log = NULL;
33479 + return -EFAULT;
33480 + }
33481
33482 - memcpy(log->bios_event_log, virt, len);
33483 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
33484
33485 acpi_os_unmap_memory(virt, len);
33486 return 0;
33487 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
33488 index 123cedf..6664cb4 100644
33489 --- a/drivers/char/tty_io.c
33490 +++ b/drivers/char/tty_io.c
33491 @@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
33492 static int tty_release(struct inode *, struct file *);
33493 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
33494 #ifdef CONFIG_COMPAT
33495 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33496 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
33497 unsigned long arg);
33498 #else
33499 #define tty_compat_ioctl NULL
33500 @@ -1774,6 +1774,7 @@ got_driver:
33501
33502 if (IS_ERR(tty)) {
33503 mutex_unlock(&tty_mutex);
33504 + tty_driver_kref_put(driver);
33505 return PTR_ERR(tty);
33506 }
33507 }
33508 @@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33509 return retval;
33510 }
33511
33512 +EXPORT_SYMBOL(tty_ioctl);
33513 +
33514 #ifdef CONFIG_COMPAT
33515 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33516 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
33517 unsigned long arg)
33518 {
33519 struct inode *inode = file->f_dentry->d_inode;
33520 @@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33521
33522 return retval;
33523 }
33524 +
33525 +EXPORT_SYMBOL(tty_compat_ioctl);
33526 #endif
33527
33528 /*
33529 @@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33530
33531 void tty_default_fops(struct file_operations *fops)
33532 {
33533 - *fops = tty_fops;
33534 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33535 }
33536
33537 /*
33538 diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
33539 index d814a3d..b55b9c9 100644
33540 --- a/drivers/char/tty_ldisc.c
33541 +++ b/drivers/char/tty_ldisc.c
33542 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
33543 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33544 struct tty_ldisc_ops *ldo = ld->ops;
33545
33546 - ldo->refcount--;
33547 + atomic_dec(&ldo->refcount);
33548 module_put(ldo->owner);
33549 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33550
33551 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
33552 spin_lock_irqsave(&tty_ldisc_lock, flags);
33553 tty_ldiscs[disc] = new_ldisc;
33554 new_ldisc->num = disc;
33555 - new_ldisc->refcount = 0;
33556 + atomic_set(&new_ldisc->refcount, 0);
33557 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33558
33559 return ret;
33560 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33561 return -EINVAL;
33562
33563 spin_lock_irqsave(&tty_ldisc_lock, flags);
33564 - if (tty_ldiscs[disc]->refcount)
33565 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33566 ret = -EBUSY;
33567 else
33568 tty_ldiscs[disc] = NULL;
33569 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
33570 if (ldops) {
33571 ret = ERR_PTR(-EAGAIN);
33572 if (try_module_get(ldops->owner)) {
33573 - ldops->refcount++;
33574 + atomic_inc(&ldops->refcount);
33575 ret = ldops;
33576 }
33577 }
33578 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
33579 unsigned long flags;
33580
33581 spin_lock_irqsave(&tty_ldisc_lock, flags);
33582 - ldops->refcount--;
33583 + atomic_dec(&ldops->refcount);
33584 module_put(ldops->owner);
33585 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33586 }
33587 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
33588 index a035ae3..c27fe2c 100644
33589 --- a/drivers/char/virtio_console.c
33590 +++ b/drivers/char/virtio_console.c
33591 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
33592 * virtqueue, so we let the drivers do some boutique early-output thing. */
33593 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
33594 {
33595 - virtio_cons.put_chars = put_chars;
33596 + pax_open_kernel();
33597 + *(void **)&virtio_cons.put_chars = put_chars;
33598 + pax_close_kernel();
33599 return hvc_instantiate(0, 0, &virtio_cons);
33600 }
33601
33602 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
33603 out_vq = vqs[1];
33604
33605 /* Start using the new console output. */
33606 - virtio_cons.get_chars = get_chars;
33607 - virtio_cons.put_chars = put_chars;
33608 - virtio_cons.notifier_add = notifier_add_vio;
33609 - virtio_cons.notifier_del = notifier_del_vio;
33610 - virtio_cons.notifier_hangup = notifier_del_vio;
33611 + pax_open_kernel();
33612 + *(void **)&virtio_cons.get_chars = get_chars;
33613 + *(void **)&virtio_cons.put_chars = put_chars;
33614 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
33615 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
33616 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
33617 + pax_close_kernel();
33618
33619 /* The first argument of hvc_alloc() is the virtual console number, so
33620 * we use zero. The second argument is the parameter for the
33621 diff --git a/drivers/char/vt.c b/drivers/char/vt.c
33622 index 0c80c68..53d59c1 100644
33623 --- a/drivers/char/vt.c
33624 +++ b/drivers/char/vt.c
33625 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
33626
33627 static void notify_write(struct vc_data *vc, unsigned int unicode)
33628 {
33629 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33630 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33631 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33632 }
33633
33634 diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
33635 index 6351a26..999af95 100644
33636 --- a/drivers/char/vt_ioctl.c
33637 +++ b/drivers/char/vt_ioctl.c
33638 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
33639 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33640 return -EFAULT;
33641
33642 - if (!capable(CAP_SYS_TTY_CONFIG))
33643 - perm = 0;
33644 -
33645 switch (cmd) {
33646 case KDGKBENT:
33647 key_map = key_maps[s];
33648 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
33649 val = (i ? K_HOLE : K_NOSUCHMAP);
33650 return put_user(val, &user_kbe->kb_value);
33651 case KDSKBENT:
33652 + if (!capable(CAP_SYS_TTY_CONFIG))
33653 + perm = 0;
33654 +
33655 if (!perm)
33656 return -EPERM;
33657 +
33658 if (!i && v == K_NOSUCHMAP) {
33659 /* deallocate map */
33660 key_map = key_maps[s];
33661 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
33662 int i, j, k;
33663 int ret;
33664
33665 - if (!capable(CAP_SYS_TTY_CONFIG))
33666 - perm = 0;
33667 -
33668 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33669 if (!kbs) {
33670 ret = -ENOMEM;
33671 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
33672 kfree(kbs);
33673 return ((p && *p) ? -EOVERFLOW : 0);
33674 case KDSKBSENT:
33675 + if (!capable(CAP_SYS_TTY_CONFIG))
33676 + perm = 0;
33677 +
33678 if (!perm) {
33679 ret = -EPERM;
33680 goto reterr;
33681 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
33682 index c7ae026..1769c1d 100644
33683 --- a/drivers/cpufreq/cpufreq.c
33684 +++ b/drivers/cpufreq/cpufreq.c
33685 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
33686 complete(&policy->kobj_unregister);
33687 }
33688
33689 -static struct sysfs_ops sysfs_ops = {
33690 +static const struct sysfs_ops sysfs_ops = {
33691 .show = show,
33692 .store = store,
33693 };
33694 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
33695 index 97b0038..2056670 100644
33696 --- a/drivers/cpuidle/sysfs.c
33697 +++ b/drivers/cpuidle/sysfs.c
33698 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
33699 return ret;
33700 }
33701
33702 -static struct sysfs_ops cpuidle_sysfs_ops = {
33703 +static const struct sysfs_ops cpuidle_sysfs_ops = {
33704 .show = cpuidle_show,
33705 .store = cpuidle_store,
33706 };
33707 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
33708 return ret;
33709 }
33710
33711 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
33712 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
33713 .show = cpuidle_state_show,
33714 };
33715
33716 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
33717 .release = cpuidle_state_sysfs_release,
33718 };
33719
33720 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
33721 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
33722 {
33723 kobject_put(&device->kobjs[i]->kobj);
33724 wait_for_completion(&device->kobjs[i]->kobj_unregister);
33725 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
33726 index 5f753fc..0377ae9 100644
33727 --- a/drivers/crypto/hifn_795x.c
33728 +++ b/drivers/crypto/hifn_795x.c
33729 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
33730 0xCA, 0x34, 0x2B, 0x2E};
33731 struct scatterlist sg;
33732
33733 + pax_track_stack();
33734 +
33735 memset(src, 0, sizeof(src));
33736 memset(ctx.key, 0, sizeof(ctx.key));
33737
33738 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
33739 index 71e6482..de8d96c 100644
33740 --- a/drivers/crypto/padlock-aes.c
33741 +++ b/drivers/crypto/padlock-aes.c
33742 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
33743 struct crypto_aes_ctx gen_aes;
33744 int cpu;
33745
33746 + pax_track_stack();
33747 +
33748 if (key_len % 8) {
33749 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
33750 return -EINVAL;
33751 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
33752 index dcc4ab7..cc834bb 100644
33753 --- a/drivers/dma/ioat/dma.c
33754 +++ b/drivers/dma/ioat/dma.c
33755 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
33756 return entry->show(&chan->common, page);
33757 }
33758
33759 -struct sysfs_ops ioat_sysfs_ops = {
33760 +const struct sysfs_ops ioat_sysfs_ops = {
33761 .show = ioat_attr_show,
33762 };
33763
33764 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
33765 index bbc3e78..f2db62c 100644
33766 --- a/drivers/dma/ioat/dma.h
33767 +++ b/drivers/dma/ioat/dma.h
33768 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
33769 unsigned long *phys_complete);
33770 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
33771 void ioat_kobject_del(struct ioatdma_device *device);
33772 -extern struct sysfs_ops ioat_sysfs_ops;
33773 +extern const struct sysfs_ops ioat_sysfs_ops;
33774 extern struct ioat_sysfs_entry ioat_version_attr;
33775 extern struct ioat_sysfs_entry ioat_cap_attr;
33776 #endif /* IOATDMA_H */
33777 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
33778 index 9908c9e..3ceb0e5 100644
33779 --- a/drivers/dma/ioat/dma_v3.c
33780 +++ b/drivers/dma/ioat/dma_v3.c
33781 @@ -71,10 +71,10 @@
33782 /* provide a lookup table for setting the source address in the base or
33783 * extended descriptor of an xor or pq descriptor
33784 */
33785 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
33786 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
33787 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
33788 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
33789 +static const u8 xor_idx_to_desc = 0xd0;
33790 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
33791 +static const u8 pq_idx_to_desc = 0xf8;
33792 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
33793
33794 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
33795 {
33796 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
33797 index 85c464a..afd1e73 100644
33798 --- a/drivers/edac/amd64_edac.c
33799 +++ b/drivers/edac/amd64_edac.c
33800 @@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
33801 * PCI core identifies what devices are on a system during boot, and then
33802 * inquiry this table to see if this driver is for a given device found.
33803 */
33804 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
33805 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
33806 {
33807 .vendor = PCI_VENDOR_ID_AMD,
33808 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
33809 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
33810 index 2b95f1a..4f52793 100644
33811 --- a/drivers/edac/amd76x_edac.c
33812 +++ b/drivers/edac/amd76x_edac.c
33813 @@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
33814 edac_mc_free(mci);
33815 }
33816
33817 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
33818 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
33819 {
33820 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33821 AMD762},
33822 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
33823 index d205d49..74c9672 100644
33824 --- a/drivers/edac/e752x_edac.c
33825 +++ b/drivers/edac/e752x_edac.c
33826 @@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
33827 edac_mc_free(mci);
33828 }
33829
33830 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
33831 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
33832 {
33833 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33834 E7520},
33835 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
33836 index c7d11cc..c59c1ca 100644
33837 --- a/drivers/edac/e7xxx_edac.c
33838 +++ b/drivers/edac/e7xxx_edac.c
33839 @@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
33840 edac_mc_free(mci);
33841 }
33842
33843 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
33844 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
33845 {
33846 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33847 E7205},
33848 diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
33849 index 5376457..5fdedbc 100644
33850 --- a/drivers/edac/edac_device_sysfs.c
33851 +++ b/drivers/edac/edac_device_sysfs.c
33852 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
33853 }
33854
33855 /* edac_dev file operations for an 'ctl_info' */
33856 -static struct sysfs_ops device_ctl_info_ops = {
33857 +static const struct sysfs_ops device_ctl_info_ops = {
33858 .show = edac_dev_ctl_info_show,
33859 .store = edac_dev_ctl_info_store
33860 };
33861 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
33862 }
33863
33864 /* edac_dev file operations for an 'instance' */
33865 -static struct sysfs_ops device_instance_ops = {
33866 +static const struct sysfs_ops device_instance_ops = {
33867 .show = edac_dev_instance_show,
33868 .store = edac_dev_instance_store
33869 };
33870 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
33871 }
33872
33873 /* edac_dev file operations for a 'block' */
33874 -static struct sysfs_ops device_block_ops = {
33875 +static const struct sysfs_ops device_block_ops = {
33876 .show = edac_dev_block_show,
33877 .store = edac_dev_block_store
33878 };
33879 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
33880 index e1d4ce0..88840e9 100644
33881 --- a/drivers/edac/edac_mc_sysfs.c
33882 +++ b/drivers/edac/edac_mc_sysfs.c
33883 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
33884 return -EIO;
33885 }
33886
33887 -static struct sysfs_ops csrowfs_ops = {
33888 +static const struct sysfs_ops csrowfs_ops = {
33889 .show = csrowdev_show,
33890 .store = csrowdev_store
33891 };
33892 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
33893 }
33894
33895 /* Intermediate show/store table */
33896 -static struct sysfs_ops mci_ops = {
33897 +static const struct sysfs_ops mci_ops = {
33898 .show = mcidev_show,
33899 .store = mcidev_store
33900 };
33901 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
33902 index 422728c..d8d9c88 100644
33903 --- a/drivers/edac/edac_pci_sysfs.c
33904 +++ b/drivers/edac/edac_pci_sysfs.c
33905 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
33906 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
33907 static int edac_pci_poll_msec = 1000; /* one second workq period */
33908
33909 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
33910 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
33911 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
33912 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
33913
33914 static struct kobject *edac_pci_top_main_kobj;
33915 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
33916 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
33917 }
33918
33919 /* fs_ops table */
33920 -static struct sysfs_ops pci_instance_ops = {
33921 +static const struct sysfs_ops pci_instance_ops = {
33922 .show = edac_pci_instance_show,
33923 .store = edac_pci_instance_store
33924 };
33925 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
33926 return -EIO;
33927 }
33928
33929 -static struct sysfs_ops edac_pci_sysfs_ops = {
33930 +static const struct sysfs_ops edac_pci_sysfs_ops = {
33931 .show = edac_pci_dev_show,
33932 .store = edac_pci_dev_store
33933 };
33934 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33935 edac_printk(KERN_CRIT, EDAC_PCI,
33936 "Signaled System Error on %s\n",
33937 pci_name(dev));
33938 - atomic_inc(&pci_nonparity_count);
33939 + atomic_inc_unchecked(&pci_nonparity_count);
33940 }
33941
33942 if (status & (PCI_STATUS_PARITY)) {
33943 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33944 "Master Data Parity Error on %s\n",
33945 pci_name(dev));
33946
33947 - atomic_inc(&pci_parity_count);
33948 + atomic_inc_unchecked(&pci_parity_count);
33949 }
33950
33951 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33952 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33953 "Detected Parity Error on %s\n",
33954 pci_name(dev));
33955
33956 - atomic_inc(&pci_parity_count);
33957 + atomic_inc_unchecked(&pci_parity_count);
33958 }
33959 }
33960
33961 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33962 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
33963 "Signaled System Error on %s\n",
33964 pci_name(dev));
33965 - atomic_inc(&pci_nonparity_count);
33966 + atomic_inc_unchecked(&pci_nonparity_count);
33967 }
33968
33969 if (status & (PCI_STATUS_PARITY)) {
33970 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33971 "Master Data Parity Error on "
33972 "%s\n", pci_name(dev));
33973
33974 - atomic_inc(&pci_parity_count);
33975 + atomic_inc_unchecked(&pci_parity_count);
33976 }
33977
33978 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33979 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33980 "Detected Parity Error on %s\n",
33981 pci_name(dev));
33982
33983 - atomic_inc(&pci_parity_count);
33984 + atomic_inc_unchecked(&pci_parity_count);
33985 }
33986 }
33987 }
33988 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
33989 if (!check_pci_errors)
33990 return;
33991
33992 - before_count = atomic_read(&pci_parity_count);
33993 + before_count = atomic_read_unchecked(&pci_parity_count);
33994
33995 /* scan all PCI devices looking for a Parity Error on devices and
33996 * bridges.
33997 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
33998 /* Only if operator has selected panic on PCI Error */
33999 if (edac_pci_get_panic_on_pe()) {
34000 /* If the count is different 'after' from 'before' */
34001 - if (before_count != atomic_read(&pci_parity_count))
34002 + if (before_count != atomic_read_unchecked(&pci_parity_count))
34003 panic("EDAC: PCI Parity Error");
34004 }
34005 }
34006 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
34007 index 6c9a0f2..9c1cf7e 100644
34008 --- a/drivers/edac/i3000_edac.c
34009 +++ b/drivers/edac/i3000_edac.c
34010 @@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
34011 edac_mc_free(mci);
34012 }
34013
34014 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
34015 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
34016 {
34017 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34018 I3000},
34019 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
34020 index fde4db9..fe108f9 100644
34021 --- a/drivers/edac/i3200_edac.c
34022 +++ b/drivers/edac/i3200_edac.c
34023 @@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
34024 edac_mc_free(mci);
34025 }
34026
34027 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
34028 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
34029 {
34030 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34031 I3200},
34032 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
34033 index adc10a2..57d4ccf 100644
34034 --- a/drivers/edac/i5000_edac.c
34035 +++ b/drivers/edac/i5000_edac.c
34036 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
34037 *
34038 * The "E500P" device is the first device supported.
34039 */
34040 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
34041 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
34042 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
34043 .driver_data = I5000P},
34044
34045 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
34046 index 22db05a..b2b5503 100644
34047 --- a/drivers/edac/i5100_edac.c
34048 +++ b/drivers/edac/i5100_edac.c
34049 @@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
34050 edac_mc_free(mci);
34051 }
34052
34053 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
34054 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
34055 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
34056 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
34057 { 0, }
34058 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
34059 index f99d106..f050710 100644
34060 --- a/drivers/edac/i5400_edac.c
34061 +++ b/drivers/edac/i5400_edac.c
34062 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
34063 *
34064 * The "E500P" device is the first device supported.
34065 */
34066 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
34067 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
34068 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
34069 {0,} /* 0 terminated list. */
34070 };
34071 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
34072 index 577760a..9ce16ce 100644
34073 --- a/drivers/edac/i82443bxgx_edac.c
34074 +++ b/drivers/edac/i82443bxgx_edac.c
34075 @@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
34076
34077 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
34078
34079 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
34080 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
34081 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
34082 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
34083 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
34084 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
34085 index c0088ba..64a7b98 100644
34086 --- a/drivers/edac/i82860_edac.c
34087 +++ b/drivers/edac/i82860_edac.c
34088 @@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
34089 edac_mc_free(mci);
34090 }
34091
34092 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
34093 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
34094 {
34095 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34096 I82860},
34097 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
34098 index b2d83b9..a34357b 100644
34099 --- a/drivers/edac/i82875p_edac.c
34100 +++ b/drivers/edac/i82875p_edac.c
34101 @@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
34102 edac_mc_free(mci);
34103 }
34104
34105 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
34106 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
34107 {
34108 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34109 I82875P},
34110 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
34111 index 2eed3ea..87bbbd1 100644
34112 --- a/drivers/edac/i82975x_edac.c
34113 +++ b/drivers/edac/i82975x_edac.c
34114 @@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
34115 edac_mc_free(mci);
34116 }
34117
34118 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
34119 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
34120 {
34121 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34122 I82975X
34123 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
34124 index 9900675..78ac2b6 100644
34125 --- a/drivers/edac/r82600_edac.c
34126 +++ b/drivers/edac/r82600_edac.c
34127 @@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
34128 edac_mc_free(mci);
34129 }
34130
34131 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
34132 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
34133 {
34134 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
34135 },
34136 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
34137 index d4ec605..4cfec4e 100644
34138 --- a/drivers/edac/x38_edac.c
34139 +++ b/drivers/edac/x38_edac.c
34140 @@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
34141 edac_mc_free(mci);
34142 }
34143
34144 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
34145 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
34146 {
34147 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34148 X38},
34149 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34150 index 3fc2ceb..daf098f 100644
34151 --- a/drivers/firewire/core-card.c
34152 +++ b/drivers/firewire/core-card.c
34153 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
34154
34155 void fw_core_remove_card(struct fw_card *card)
34156 {
34157 - struct fw_card_driver dummy_driver = dummy_driver_template;
34158 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
34159
34160 card->driver->update_phy_reg(card, 4,
34161 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34162 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34163 index 4560d8f..36db24a 100644
34164 --- a/drivers/firewire/core-cdev.c
34165 +++ b/drivers/firewire/core-cdev.c
34166 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
34167 int ret;
34168
34169 if ((request->channels == 0 && request->bandwidth == 0) ||
34170 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34171 - request->bandwidth < 0)
34172 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34173 return -EINVAL;
34174
34175 r = kmalloc(sizeof(*r), GFP_KERNEL);
34176 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34177 index da628c7..cf54a2c 100644
34178 --- a/drivers/firewire/core-transaction.c
34179 +++ b/drivers/firewire/core-transaction.c
34180 @@ -36,6 +36,7 @@
34181 #include <linux/string.h>
34182 #include <linux/timer.h>
34183 #include <linux/types.h>
34184 +#include <linux/sched.h>
34185
34186 #include <asm/byteorder.h>
34187
34188 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
34189 struct transaction_callback_data d;
34190 struct fw_transaction t;
34191
34192 + pax_track_stack();
34193 +
34194 init_completion(&d.done);
34195 d.payload = payload;
34196 fw_send_request(card, &t, tcode, destination_id, generation, speed,
34197 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34198 index 7ff6e75..a2965d9 100644
34199 --- a/drivers/firewire/core.h
34200 +++ b/drivers/firewire/core.h
34201 @@ -86,6 +86,7 @@ struct fw_card_driver {
34202
34203 int (*stop_iso)(struct fw_iso_context *ctx);
34204 };
34205 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34206
34207 void fw_card_initialize(struct fw_card *card,
34208 const struct fw_card_driver *driver, struct device *device);
34209 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34210 index 3a2ccb0..82fd7c4 100644
34211 --- a/drivers/firmware/dmi_scan.c
34212 +++ b/drivers/firmware/dmi_scan.c
34213 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
34214 }
34215 }
34216 else {
34217 - /*
34218 - * no iounmap() for that ioremap(); it would be a no-op, but
34219 - * it's so early in setup that sucker gets confused into doing
34220 - * what it shouldn't if we actually call it.
34221 - */
34222 p = dmi_ioremap(0xF0000, 0x10000);
34223 if (p == NULL)
34224 goto error;
34225 @@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34226 if (buf == NULL)
34227 return -1;
34228
34229 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34230 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34231
34232 iounmap(buf);
34233 return 0;
34234 diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
34235 index 9e4f59d..110e24e 100644
34236 --- a/drivers/firmware/edd.c
34237 +++ b/drivers/firmware/edd.c
34238 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
34239 return ret;
34240 }
34241
34242 -static struct sysfs_ops edd_attr_ops = {
34243 +static const struct sysfs_ops edd_attr_ops = {
34244 .show = edd_attr_show,
34245 };
34246
34247 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34248 index f4f709d..082f06e 100644
34249 --- a/drivers/firmware/efivars.c
34250 +++ b/drivers/firmware/efivars.c
34251 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
34252 return ret;
34253 }
34254
34255 -static struct sysfs_ops efivar_attr_ops = {
34256 +static const struct sysfs_ops efivar_attr_ops = {
34257 .show = efivar_attr_show,
34258 .store = efivar_attr_store,
34259 };
34260 diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
34261 index 051d1eb..0a5d4e7 100644
34262 --- a/drivers/firmware/iscsi_ibft.c
34263 +++ b/drivers/firmware/iscsi_ibft.c
34264 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
34265 return ret;
34266 }
34267
34268 -static struct sysfs_ops ibft_attr_ops = {
34269 +static const struct sysfs_ops ibft_attr_ops = {
34270 .show = ibft_show_attribute,
34271 };
34272
34273 diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
34274 index 56f9234..8c58c7b 100644
34275 --- a/drivers/firmware/memmap.c
34276 +++ b/drivers/firmware/memmap.c
34277 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
34278 NULL
34279 };
34280
34281 -static struct sysfs_ops memmap_attr_ops = {
34282 +static const struct sysfs_ops memmap_attr_ops = {
34283 .show = memmap_attr_show,
34284 };
34285
34286 diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
34287 index b16c9a8..2af7d3f 100644
34288 --- a/drivers/gpio/vr41xx_giu.c
34289 +++ b/drivers/gpio/vr41xx_giu.c
34290 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
34291 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
34292 maskl, pendl, maskh, pendh);
34293
34294 - atomic_inc(&irq_err_count);
34295 + atomic_inc_unchecked(&irq_err_count);
34296
34297 return -EINVAL;
34298 }
34299 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
34300 index bea6efc..3dc0f42 100644
34301 --- a/drivers/gpu/drm/drm_crtc.c
34302 +++ b/drivers/gpu/drm/drm_crtc.c
34303 @@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34304 */
34305 if ((out_resp->count_modes >= mode_count) && mode_count) {
34306 copied = 0;
34307 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
34308 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
34309 list_for_each_entry(mode, &connector->modes, head) {
34310 drm_crtc_convert_to_umode(&u_mode, mode);
34311 if (copy_to_user(mode_ptr + copied,
34312 @@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34313
34314 if ((out_resp->count_props >= props_count) && props_count) {
34315 copied = 0;
34316 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
34317 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
34318 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
34319 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
34320 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
34321 if (connector->property_ids[i] != 0) {
34322 if (put_user(connector->property_ids[i],
34323 @@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34324
34325 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
34326 copied = 0;
34327 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
34328 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
34329 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
34330 if (connector->encoder_ids[i] != 0) {
34331 if (put_user(connector->encoder_ids[i],
34332 @@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
34333 }
34334
34335 for (i = 0; i < crtc_req->count_connectors; i++) {
34336 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
34337 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
34338 if (get_user(out_id, &set_connectors_ptr[i])) {
34339 ret = -EFAULT;
34340 goto out;
34341 @@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34342 out_resp->flags = property->flags;
34343
34344 if ((out_resp->count_values >= value_count) && value_count) {
34345 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
34346 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
34347 for (i = 0; i < value_count; i++) {
34348 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
34349 ret = -EFAULT;
34350 @@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34351 if (property->flags & DRM_MODE_PROP_ENUM) {
34352 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
34353 copied = 0;
34354 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
34355 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
34356 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
34357
34358 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
34359 @@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34360 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
34361 copied = 0;
34362 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
34363 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
34364 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
34365
34366 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
34367 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
34368 @@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
34369 blob = obj_to_blob(obj);
34370
34371 if (out_resp->length == blob->length) {
34372 - blob_ptr = (void *)(unsigned long)out_resp->data;
34373 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
34374 if (copy_to_user(blob_ptr, blob->data, blob->length)){
34375 ret = -EFAULT;
34376 goto done;
34377 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
34378 index 1b8745d..92fdbf6 100644
34379 --- a/drivers/gpu/drm/drm_crtc_helper.c
34380 +++ b/drivers/gpu/drm/drm_crtc_helper.c
34381 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
34382 struct drm_crtc *tmp;
34383 int crtc_mask = 1;
34384
34385 - WARN(!crtc, "checking null crtc?");
34386 + BUG_ON(!crtc);
34387
34388 dev = crtc->dev;
34389
34390 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
34391
34392 adjusted_mode = drm_mode_duplicate(dev, mode);
34393
34394 + pax_track_stack();
34395 +
34396 crtc->enabled = drm_helper_crtc_in_use(crtc);
34397
34398 if (!crtc->enabled)
34399 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
34400 index 0e27d98..dec8768 100644
34401 --- a/drivers/gpu/drm/drm_drv.c
34402 +++ b/drivers/gpu/drm/drm_drv.c
34403 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
34404 char *kdata = NULL;
34405
34406 atomic_inc(&dev->ioctl_count);
34407 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
34408 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
34409 ++file_priv->ioctl_count;
34410
34411 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
34412 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
34413 index 519161e..98c840c 100644
34414 --- a/drivers/gpu/drm/drm_fops.c
34415 +++ b/drivers/gpu/drm/drm_fops.c
34416 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
34417 }
34418
34419 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
34420 - atomic_set(&dev->counts[i], 0);
34421 + atomic_set_unchecked(&dev->counts[i], 0);
34422
34423 dev->sigdata.lock = NULL;
34424
34425 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
34426
34427 retcode = drm_open_helper(inode, filp, dev);
34428 if (!retcode) {
34429 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
34430 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
34431 spin_lock(&dev->count_lock);
34432 - if (!dev->open_count++) {
34433 + if (local_inc_return(&dev->open_count) == 1) {
34434 spin_unlock(&dev->count_lock);
34435 retcode = drm_setup(dev);
34436 goto out;
34437 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
34438
34439 lock_kernel();
34440
34441 - DRM_DEBUG("open_count = %d\n", dev->open_count);
34442 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
34443
34444 if (dev->driver->preclose)
34445 dev->driver->preclose(dev, file_priv);
34446 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
34447 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
34448 task_pid_nr(current),
34449 (long)old_encode_dev(file_priv->minor->device),
34450 - dev->open_count);
34451 + local_read(&dev->open_count));
34452
34453 /* Release any auth tokens that might point to this file_priv,
34454 (do that under the drm_global_mutex) */
34455 @@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
34456 * End inline drm_release
34457 */
34458
34459 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
34460 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
34461 spin_lock(&dev->count_lock);
34462 - if (!--dev->open_count) {
34463 + if (local_dec_and_test(&dev->open_count)) {
34464 if (atomic_read(&dev->ioctl_count)) {
34465 DRM_ERROR("Device busy: %d\n",
34466 atomic_read(&dev->ioctl_count));
34467 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
34468 index 8bf3770..79422805 100644
34469 --- a/drivers/gpu/drm/drm_gem.c
34470 +++ b/drivers/gpu/drm/drm_gem.c
34471 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
34472 spin_lock_init(&dev->object_name_lock);
34473 idr_init(&dev->object_name_idr);
34474 atomic_set(&dev->object_count, 0);
34475 - atomic_set(&dev->object_memory, 0);
34476 + atomic_set_unchecked(&dev->object_memory, 0);
34477 atomic_set(&dev->pin_count, 0);
34478 - atomic_set(&dev->pin_memory, 0);
34479 + atomic_set_unchecked(&dev->pin_memory, 0);
34480 atomic_set(&dev->gtt_count, 0);
34481 - atomic_set(&dev->gtt_memory, 0);
34482 + atomic_set_unchecked(&dev->gtt_memory, 0);
34483
34484 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
34485 if (!mm) {
34486 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
34487 goto fput;
34488 }
34489 atomic_inc(&dev->object_count);
34490 - atomic_add(obj->size, &dev->object_memory);
34491 + atomic_add_unchecked(obj->size, &dev->object_memory);
34492 return obj;
34493 fput:
34494 fput(obj->filp);
34495 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
34496
34497 fput(obj->filp);
34498 atomic_dec(&dev->object_count);
34499 - atomic_sub(obj->size, &dev->object_memory);
34500 + atomic_sub_unchecked(obj->size, &dev->object_memory);
34501 kfree(obj);
34502 }
34503 EXPORT_SYMBOL(drm_gem_object_free);
34504 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
34505 index f0f6c6b..34af322 100644
34506 --- a/drivers/gpu/drm/drm_info.c
34507 +++ b/drivers/gpu/drm/drm_info.c
34508 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
34509 struct drm_local_map *map;
34510 struct drm_map_list *r_list;
34511
34512 - /* Hardcoded from _DRM_FRAME_BUFFER,
34513 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
34514 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
34515 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
34516 + static const char * const types[] = {
34517 + [_DRM_FRAME_BUFFER] = "FB",
34518 + [_DRM_REGISTERS] = "REG",
34519 + [_DRM_SHM] = "SHM",
34520 + [_DRM_AGP] = "AGP",
34521 + [_DRM_SCATTER_GATHER] = "SG",
34522 + [_DRM_CONSISTENT] = "PCI",
34523 + [_DRM_GEM] = "GEM" };
34524 const char *type;
34525 int i;
34526
34527 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
34528 map = r_list->map;
34529 if (!map)
34530 continue;
34531 - if (map->type < 0 || map->type > 5)
34532 + if (map->type >= ARRAY_SIZE(types))
34533 type = "??";
34534 else
34535 type = types[map->type];
34536 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
34537 struct drm_device *dev = node->minor->dev;
34538
34539 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
34540 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
34541 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
34542 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
34543 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
34544 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
34545 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
34546 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
34547 seq_printf(m, "%d gtt total\n", dev->gtt_total);
34548 return 0;
34549 }
34550 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
34551 mutex_lock(&dev->struct_mutex);
34552 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
34553 atomic_read(&dev->vma_count),
34554 +#ifdef CONFIG_GRKERNSEC_HIDESYM
34555 + NULL, 0);
34556 +#else
34557 high_memory, (u64)virt_to_phys(high_memory));
34558 +#endif
34559
34560 list_for_each_entry(pt, &dev->vmalist, head) {
34561 vma = pt->vma;
34562 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
34563 continue;
34564 seq_printf(m,
34565 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
34566 - pt->pid, vma->vm_start, vma->vm_end,
34567 + pt->pid,
34568 +#ifdef CONFIG_GRKERNSEC_HIDESYM
34569 + 0, 0,
34570 +#else
34571 + vma->vm_start, vma->vm_end,
34572 +#endif
34573 vma->vm_flags & VM_READ ? 'r' : '-',
34574 vma->vm_flags & VM_WRITE ? 'w' : '-',
34575 vma->vm_flags & VM_EXEC ? 'x' : '-',
34576 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
34577 vma->vm_flags & VM_LOCKED ? 'l' : '-',
34578 vma->vm_flags & VM_IO ? 'i' : '-',
34579 +#ifdef CONFIG_GRKERNSEC_HIDESYM
34580 + 0);
34581 +#else
34582 vma->vm_pgoff);
34583 +#endif
34584
34585 #if defined(__i386__)
34586 pgprot = pgprot_val(vma->vm_page_prot);
34587 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
34588 index 282d9fd..71e5f11 100644
34589 --- a/drivers/gpu/drm/drm_ioc32.c
34590 +++ b/drivers/gpu/drm/drm_ioc32.c
34591 @@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
34592 request = compat_alloc_user_space(nbytes);
34593 if (!access_ok(VERIFY_WRITE, request, nbytes))
34594 return -EFAULT;
34595 - list = (struct drm_buf_desc *) (request + 1);
34596 + list = (struct drm_buf_desc __user *) (request + 1);
34597
34598 if (__put_user(count, &request->count)
34599 || __put_user(list, &request->list))
34600 @@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
34601 request = compat_alloc_user_space(nbytes);
34602 if (!access_ok(VERIFY_WRITE, request, nbytes))
34603 return -EFAULT;
34604 - list = (struct drm_buf_pub *) (request + 1);
34605 + list = (struct drm_buf_pub __user *) (request + 1);
34606
34607 if (__put_user(count, &request->count)
34608 || __put_user(list, &request->list))
34609 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
34610 index 9b9ff46..4ea724c 100644
34611 --- a/drivers/gpu/drm/drm_ioctl.c
34612 +++ b/drivers/gpu/drm/drm_ioctl.c
34613 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
34614 stats->data[i].value =
34615 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
34616 else
34617 - stats->data[i].value = atomic_read(&dev->counts[i]);
34618 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
34619 stats->data[i].type = dev->types[i];
34620 }
34621
34622 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
34623 index e2f70a5..c703e86 100644
34624 --- a/drivers/gpu/drm/drm_lock.c
34625 +++ b/drivers/gpu/drm/drm_lock.c
34626 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34627 if (drm_lock_take(&master->lock, lock->context)) {
34628 master->lock.file_priv = file_priv;
34629 master->lock.lock_time = jiffies;
34630 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
34631 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
34632 break; /* Got lock */
34633 }
34634
34635 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34636 return -EINVAL;
34637 }
34638
34639 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
34640 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
34641
34642 /* kernel_context_switch isn't used by any of the x86 drm
34643 * modules but is required by the Sparc driver.
34644 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
34645 index 7d1d88c..b9131b2 100644
34646 --- a/drivers/gpu/drm/i810/i810_dma.c
34647 +++ b/drivers/gpu/drm/i810/i810_dma.c
34648 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
34649 dma->buflist[vertex->idx],
34650 vertex->discard, vertex->used);
34651
34652 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34653 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34654 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34655 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34656 sarea_priv->last_enqueue = dev_priv->counter - 1;
34657 sarea_priv->last_dispatch = (int)hw_status[5];
34658
34659 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
34660 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
34661 mc->last_render);
34662
34663 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34664 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34665 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34666 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34667 sarea_priv->last_enqueue = dev_priv->counter - 1;
34668 sarea_priv->last_dispatch = (int)hw_status[5];
34669
34670 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
34671 index 21e2691..7321edd 100644
34672 --- a/drivers/gpu/drm/i810/i810_drv.h
34673 +++ b/drivers/gpu/drm/i810/i810_drv.h
34674 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
34675 int page_flipping;
34676
34677 wait_queue_head_t irq_queue;
34678 - atomic_t irq_received;
34679 - atomic_t irq_emitted;
34680 + atomic_unchecked_t irq_received;
34681 + atomic_unchecked_t irq_emitted;
34682
34683 int front_offset;
34684 } drm_i810_private_t;
34685 diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
34686 index da82afe..48a45de 100644
34687 --- a/drivers/gpu/drm/i830/i830_drv.h
34688 +++ b/drivers/gpu/drm/i830/i830_drv.h
34689 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
34690 int page_flipping;
34691
34692 wait_queue_head_t irq_queue;
34693 - atomic_t irq_received;
34694 - atomic_t irq_emitted;
34695 + atomic_unchecked_t irq_received;
34696 + atomic_unchecked_t irq_emitted;
34697
34698 int use_mi_batchbuffer_start;
34699
34700 diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
34701 index 91ec2bb..6f21fab 100644
34702 --- a/drivers/gpu/drm/i830/i830_irq.c
34703 +++ b/drivers/gpu/drm/i830/i830_irq.c
34704 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
34705
34706 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
34707
34708 - atomic_inc(&dev_priv->irq_received);
34709 + atomic_inc_unchecked(&dev_priv->irq_received);
34710 wake_up_interruptible(&dev_priv->irq_queue);
34711
34712 return IRQ_HANDLED;
34713 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
34714
34715 DRM_DEBUG("%s\n", __func__);
34716
34717 - atomic_inc(&dev_priv->irq_emitted);
34718 + atomic_inc_unchecked(&dev_priv->irq_emitted);
34719
34720 BEGIN_LP_RING(2);
34721 OUT_RING(0);
34722 OUT_RING(GFX_OP_USER_INTERRUPT);
34723 ADVANCE_LP_RING();
34724
34725 - return atomic_read(&dev_priv->irq_emitted);
34726 + return atomic_read_unchecked(&dev_priv->irq_emitted);
34727 }
34728
34729 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34730 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34731
34732 DRM_DEBUG("%s\n", __func__);
34733
34734 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
34735 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
34736 return 0;
34737
34738 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
34739 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34740
34741 for (;;) {
34742 __set_current_state(TASK_INTERRUPTIBLE);
34743 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
34744 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
34745 break;
34746 if ((signed)(end - jiffies) <= 0) {
34747 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
34748 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
34749 I830_WRITE16(I830REG_HWSTAM, 0xffff);
34750 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
34751 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
34752 - atomic_set(&dev_priv->irq_received, 0);
34753 - atomic_set(&dev_priv->irq_emitted, 0);
34754 + atomic_set_unchecked(&dev_priv->irq_received, 0);
34755 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
34756 init_waitqueue_head(&dev_priv->irq_queue);
34757 }
34758
34759 diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
34760 index 288fc50..c6092055 100644
34761 --- a/drivers/gpu/drm/i915/dvo.h
34762 +++ b/drivers/gpu/drm/i915/dvo.h
34763 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
34764 *
34765 * \return singly-linked list of modes or NULL if no modes found.
34766 */
34767 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
34768 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
34769
34770 /**
34771 * Clean up driver-specific bits of the output
34772 */
34773 - void (*destroy) (struct intel_dvo_device *dvo);
34774 + void (* const destroy) (struct intel_dvo_device *dvo);
34775
34776 /**
34777 * Debugging hook to dump device registers to log file
34778 */
34779 - void (*dump_regs)(struct intel_dvo_device *dvo);
34780 + void (* const dump_regs)(struct intel_dvo_device *dvo);
34781 };
34782
34783 -extern struct intel_dvo_dev_ops sil164_ops;
34784 -extern struct intel_dvo_dev_ops ch7xxx_ops;
34785 -extern struct intel_dvo_dev_ops ivch_ops;
34786 -extern struct intel_dvo_dev_ops tfp410_ops;
34787 -extern struct intel_dvo_dev_ops ch7017_ops;
34788 +extern const struct intel_dvo_dev_ops sil164_ops;
34789 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
34790 +extern const struct intel_dvo_dev_ops ivch_ops;
34791 +extern const struct intel_dvo_dev_ops tfp410_ops;
34792 +extern const struct intel_dvo_dev_ops ch7017_ops;
34793
34794 #endif /* _INTEL_DVO_H */
34795 diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
34796 index 621815b..499d82e 100644
34797 --- a/drivers/gpu/drm/i915/dvo_ch7017.c
34798 +++ b/drivers/gpu/drm/i915/dvo_ch7017.c
34799 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
34800 }
34801 }
34802
34803 -struct intel_dvo_dev_ops ch7017_ops = {
34804 +const struct intel_dvo_dev_ops ch7017_ops = {
34805 .init = ch7017_init,
34806 .detect = ch7017_detect,
34807 .mode_valid = ch7017_mode_valid,
34808 diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
34809 index a9b8962..ac769ba 100644
34810 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
34811 +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
34812 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
34813 }
34814 }
34815
34816 -struct intel_dvo_dev_ops ch7xxx_ops = {
34817 +const struct intel_dvo_dev_ops ch7xxx_ops = {
34818 .init = ch7xxx_init,
34819 .detect = ch7xxx_detect,
34820 .mode_valid = ch7xxx_mode_valid,
34821 diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
34822 index aa176f9..ed2930c 100644
34823 --- a/drivers/gpu/drm/i915/dvo_ivch.c
34824 +++ b/drivers/gpu/drm/i915/dvo_ivch.c
34825 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
34826 }
34827 }
34828
34829 -struct intel_dvo_dev_ops ivch_ops= {
34830 +const struct intel_dvo_dev_ops ivch_ops= {
34831 .init = ivch_init,
34832 .dpms = ivch_dpms,
34833 .save = ivch_save,
34834 diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
34835 index e1c1f73..7dbebcf 100644
34836 --- a/drivers/gpu/drm/i915/dvo_sil164.c
34837 +++ b/drivers/gpu/drm/i915/dvo_sil164.c
34838 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
34839 }
34840 }
34841
34842 -struct intel_dvo_dev_ops sil164_ops = {
34843 +const struct intel_dvo_dev_ops sil164_ops = {
34844 .init = sil164_init,
34845 .detect = sil164_detect,
34846 .mode_valid = sil164_mode_valid,
34847 diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
34848 index 16dce84..7e1b6f8 100644
34849 --- a/drivers/gpu/drm/i915/dvo_tfp410.c
34850 +++ b/drivers/gpu/drm/i915/dvo_tfp410.c
34851 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
34852 }
34853 }
34854
34855 -struct intel_dvo_dev_ops tfp410_ops = {
34856 +const struct intel_dvo_dev_ops tfp410_ops = {
34857 .init = tfp410_init,
34858 .detect = tfp410_detect,
34859 .mode_valid = tfp410_mode_valid,
34860 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
34861 index 7e859d6..7d1cf2b 100644
34862 --- a/drivers/gpu/drm/i915/i915_debugfs.c
34863 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
34864 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
34865 I915_READ(GTIMR));
34866 }
34867 seq_printf(m, "Interrupts received: %d\n",
34868 - atomic_read(&dev_priv->irq_received));
34869 + atomic_read_unchecked(&dev_priv->irq_received));
34870 if (dev_priv->hw_status_page != NULL) {
34871 seq_printf(m, "Current sequence: %d\n",
34872 i915_get_gem_seqno(dev));
34873 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
34874 index 5449239..7e4f68d 100644
34875 --- a/drivers/gpu/drm/i915/i915_drv.c
34876 +++ b/drivers/gpu/drm/i915/i915_drv.c
34877 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
34878 return i915_resume(dev);
34879 }
34880
34881 -static struct vm_operations_struct i915_gem_vm_ops = {
34882 +static const struct vm_operations_struct i915_gem_vm_ops = {
34883 .fault = i915_gem_fault,
34884 .open = drm_gem_vm_open,
34885 .close = drm_gem_vm_close,
34886 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
34887 index 97163f7..c24c7c7 100644
34888 --- a/drivers/gpu/drm/i915/i915_drv.h
34889 +++ b/drivers/gpu/drm/i915/i915_drv.h
34890 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
34891 /* display clock increase/decrease */
34892 /* pll clock increase/decrease */
34893 /* clock gating init */
34894 -};
34895 +} __no_const;
34896
34897 typedef struct drm_i915_private {
34898 struct drm_device *dev;
34899 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
34900 int page_flipping;
34901
34902 wait_queue_head_t irq_queue;
34903 - atomic_t irq_received;
34904 + atomic_unchecked_t irq_received;
34905 /** Protects user_irq_refcount and irq_mask_reg */
34906 spinlock_t user_irq_lock;
34907 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
34908 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
34909 index 27a3074..eb3f959 100644
34910 --- a/drivers/gpu/drm/i915/i915_gem.c
34911 +++ b/drivers/gpu/drm/i915/i915_gem.c
34912 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
34913
34914 args->aper_size = dev->gtt_total;
34915 args->aper_available_size = (args->aper_size -
34916 - atomic_read(&dev->pin_memory));
34917 + atomic_read_unchecked(&dev->pin_memory));
34918
34919 return 0;
34920 }
34921 @@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
34922
34923 if (obj_priv->gtt_space) {
34924 atomic_dec(&dev->gtt_count);
34925 - atomic_sub(obj->size, &dev->gtt_memory);
34926 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
34927
34928 drm_mm_put_block(obj_priv->gtt_space);
34929 obj_priv->gtt_space = NULL;
34930 @@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
34931 goto search_free;
34932 }
34933 atomic_inc(&dev->gtt_count);
34934 - atomic_add(obj->size, &dev->gtt_memory);
34935 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
34936
34937 /* Assert that the object is not currently in any GPU domain. As it
34938 * wasn't in the GTT, there shouldn't be any way it could have been in
34939 @@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
34940 "%d/%d gtt bytes\n",
34941 atomic_read(&dev->object_count),
34942 atomic_read(&dev->pin_count),
34943 - atomic_read(&dev->object_memory),
34944 - atomic_read(&dev->pin_memory),
34945 - atomic_read(&dev->gtt_memory),
34946 + atomic_read_unchecked(&dev->object_memory),
34947 + atomic_read_unchecked(&dev->pin_memory),
34948 + atomic_read_unchecked(&dev->gtt_memory),
34949 dev->gtt_total);
34950 }
34951 goto err;
34952 @@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
34953 */
34954 if (obj_priv->pin_count == 1) {
34955 atomic_inc(&dev->pin_count);
34956 - atomic_add(obj->size, &dev->pin_memory);
34957 + atomic_add_unchecked(obj->size, &dev->pin_memory);
34958 if (!obj_priv->active &&
34959 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
34960 !list_empty(&obj_priv->list))
34961 @@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
34962 list_move_tail(&obj_priv->list,
34963 &dev_priv->mm.inactive_list);
34964 atomic_dec(&dev->pin_count);
34965 - atomic_sub(obj->size, &dev->pin_memory);
34966 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
34967 }
34968 i915_verify_inactive(dev, __FILE__, __LINE__);
34969 }
34970 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
34971 index 63f28ad..f5469da 100644
34972 --- a/drivers/gpu/drm/i915/i915_irq.c
34973 +++ b/drivers/gpu/drm/i915/i915_irq.c
34974 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
34975 int irq_received;
34976 int ret = IRQ_NONE;
34977
34978 - atomic_inc(&dev_priv->irq_received);
34979 + atomic_inc_unchecked(&dev_priv->irq_received);
34980
34981 if (IS_IGDNG(dev))
34982 return igdng_irq_handler(dev);
34983 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
34984 {
34985 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34986
34987 - atomic_set(&dev_priv->irq_received, 0);
34988 + atomic_set_unchecked(&dev_priv->irq_received, 0);
34989
34990 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
34991 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
34992 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
34993 index 5d9c6a7..d1b0e29 100644
34994 --- a/drivers/gpu/drm/i915/intel_sdvo.c
34995 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
34996 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
34997 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
34998
34999 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
35000 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
35001 + pax_open_kernel();
35002 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
35003 + pax_close_kernel();
35004
35005 /* Read the regs to test if we can talk to the device */
35006 for (i = 0; i < 0x40; i++) {
35007 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35008 index be6c6b9..8615d9c 100644
35009 --- a/drivers/gpu/drm/mga/mga_drv.h
35010 +++ b/drivers/gpu/drm/mga/mga_drv.h
35011 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35012 u32 clear_cmd;
35013 u32 maccess;
35014
35015 - atomic_t vbl_received; /**< Number of vblanks received. */
35016 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35017 wait_queue_head_t fence_queue;
35018 - atomic_t last_fence_retired;
35019 + atomic_unchecked_t last_fence_retired;
35020 u32 next_fence_to_post;
35021
35022 unsigned int fb_cpp;
35023 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35024 index daa6041..a28a5da 100644
35025 --- a/drivers/gpu/drm/mga/mga_irq.c
35026 +++ b/drivers/gpu/drm/mga/mga_irq.c
35027 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35028 if (crtc != 0)
35029 return 0;
35030
35031 - return atomic_read(&dev_priv->vbl_received);
35032 + return atomic_read_unchecked(&dev_priv->vbl_received);
35033 }
35034
35035
35036 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35037 /* VBLANK interrupt */
35038 if (status & MGA_VLINEPEN) {
35039 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35040 - atomic_inc(&dev_priv->vbl_received);
35041 + atomic_inc_unchecked(&dev_priv->vbl_received);
35042 drm_handle_vblank(dev, 0);
35043 handled = 1;
35044 }
35045 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35046 MGA_WRITE(MGA_PRIMEND, prim_end);
35047 }
35048
35049 - atomic_inc(&dev_priv->last_fence_retired);
35050 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
35051 DRM_WAKEUP(&dev_priv->fence_queue);
35052 handled = 1;
35053 }
35054 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
35055 * using fences.
35056 */
35057 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35058 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35059 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35060 - *sequence) <= (1 << 23)));
35061
35062 *sequence = cur_fence;
35063 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35064 index 4c39a40..b22a9ea 100644
35065 --- a/drivers/gpu/drm/r128/r128_cce.c
35066 +++ b/drivers/gpu/drm/r128/r128_cce.c
35067 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
35068
35069 /* GH: Simple idle check.
35070 */
35071 - atomic_set(&dev_priv->idle_count, 0);
35072 + atomic_set_unchecked(&dev_priv->idle_count, 0);
35073
35074 /* We don't support anything other than bus-mastering ring mode,
35075 * but the ring can be in either AGP or PCI space for the ring
35076 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35077 index 3c60829..4faf484 100644
35078 --- a/drivers/gpu/drm/r128/r128_drv.h
35079 +++ b/drivers/gpu/drm/r128/r128_drv.h
35080 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35081 int is_pci;
35082 unsigned long cce_buffers_offset;
35083
35084 - atomic_t idle_count;
35085 + atomic_unchecked_t idle_count;
35086
35087 int page_flipping;
35088 int current_page;
35089 u32 crtc_offset;
35090 u32 crtc_offset_cntl;
35091
35092 - atomic_t vbl_received;
35093 + atomic_unchecked_t vbl_received;
35094
35095 u32 color_fmt;
35096 unsigned int front_offset;
35097 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35098 index 69810fb..97bf17a 100644
35099 --- a/drivers/gpu/drm/r128/r128_irq.c
35100 +++ b/drivers/gpu/drm/r128/r128_irq.c
35101 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35102 if (crtc != 0)
35103 return 0;
35104
35105 - return atomic_read(&dev_priv->vbl_received);
35106 + return atomic_read_unchecked(&dev_priv->vbl_received);
35107 }
35108
35109 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35110 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35111 /* VBLANK interrupt */
35112 if (status & R128_CRTC_VBLANK_INT) {
35113 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
35114 - atomic_inc(&dev_priv->vbl_received);
35115 + atomic_inc_unchecked(&dev_priv->vbl_received);
35116 drm_handle_vblank(dev, 0);
35117 return IRQ_HANDLED;
35118 }
35119 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
35120 index af2665c..51922d2 100644
35121 --- a/drivers/gpu/drm/r128/r128_state.c
35122 +++ b/drivers/gpu/drm/r128/r128_state.c
35123 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
35124
35125 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
35126 {
35127 - if (atomic_read(&dev_priv->idle_count) == 0) {
35128 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
35129 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
35130 } else {
35131 - atomic_set(&dev_priv->idle_count, 0);
35132 + atomic_set_unchecked(&dev_priv->idle_count, 0);
35133 }
35134 }
35135
35136 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
35137 index dd72b91..8644b3c 100644
35138 --- a/drivers/gpu/drm/radeon/atom.c
35139 +++ b/drivers/gpu/drm/radeon/atom.c
35140 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
35141 char name[512];
35142 int i;
35143
35144 + pax_track_stack();
35145 +
35146 ctx->card = card;
35147 ctx->bios = bios;
35148
35149 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
35150 index 0d79577..efaa7a5 100644
35151 --- a/drivers/gpu/drm/radeon/mkregtable.c
35152 +++ b/drivers/gpu/drm/radeon/mkregtable.c
35153 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
35154 regex_t mask_rex;
35155 regmatch_t match[4];
35156 char buf[1024];
35157 - size_t end;
35158 + long end;
35159 int len;
35160 int done = 0;
35161 int r;
35162 unsigned o;
35163 struct offset *offset;
35164 char last_reg_s[10];
35165 - int last_reg;
35166 + unsigned long last_reg;
35167
35168 if (regcomp
35169 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
35170 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
35171 index 6735213..38c2c67 100644
35172 --- a/drivers/gpu/drm/radeon/radeon.h
35173 +++ b/drivers/gpu/drm/radeon/radeon.h
35174 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
35175 */
35176 struct radeon_fence_driver {
35177 uint32_t scratch_reg;
35178 - atomic_t seq;
35179 + atomic_unchecked_t seq;
35180 uint32_t last_seq;
35181 unsigned long count_timeout;
35182 wait_queue_head_t queue;
35183 @@ -640,7 +640,7 @@ struct radeon_asic {
35184 uint32_t offset, uint32_t obj_size);
35185 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
35186 void (*bandwidth_update)(struct radeon_device *rdev);
35187 -};
35188 +} __no_const;
35189
35190 /*
35191 * Asic structures
35192 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
35193 index 4e928b9..d8b6008 100644
35194 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
35195 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
35196 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
35197 bool linkb;
35198 struct radeon_i2c_bus_rec ddc_bus;
35199
35200 + pax_track_stack();
35201 +
35202 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
35203
35204 if (data_offset == 0)
35205 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
35206 }
35207 }
35208
35209 -struct bios_connector {
35210 +static struct bios_connector {
35211 bool valid;
35212 uint16_t line_mux;
35213 uint16_t devices;
35214 int connector_type;
35215 struct radeon_i2c_bus_rec ddc_bus;
35216 -};
35217 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
35218
35219 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
35220 drm_device
35221 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
35222 uint8_t dac;
35223 union atom_supported_devices *supported_devices;
35224 int i, j;
35225 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
35226
35227 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
35228
35229 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
35230 index 083a181..ccccae0 100644
35231 --- a/drivers/gpu/drm/radeon/radeon_display.c
35232 +++ b/drivers/gpu/drm/radeon/radeon_display.c
35233 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
35234
35235 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
35236 error = freq - current_freq;
35237 - error = error < 0 ? 0xffffffff : error;
35238 + error = (int32_t)error < 0 ? 0xffffffff : error;
35239 } else
35240 error = abs(current_freq - freq);
35241 vco_diff = abs(vco - best_vco);
35242 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
35243 index 76e4070..193fa7f 100644
35244 --- a/drivers/gpu/drm/radeon/radeon_drv.h
35245 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
35246 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
35247
35248 /* SW interrupt */
35249 wait_queue_head_t swi_queue;
35250 - atomic_t swi_emitted;
35251 + atomic_unchecked_t swi_emitted;
35252 int vblank_crtc;
35253 uint32_t irq_enable_reg;
35254 uint32_t r500_disp_irq_reg;
35255 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
35256 index 3beb26d..6ce9c4a 100644
35257 --- a/drivers/gpu/drm/radeon/radeon_fence.c
35258 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
35259 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
35260 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
35261 return 0;
35262 }
35263 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
35264 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
35265 if (!rdev->cp.ready) {
35266 /* FIXME: cp is not running assume everythings is done right
35267 * away
35268 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
35269 return r;
35270 }
35271 WREG32(rdev->fence_drv.scratch_reg, 0);
35272 - atomic_set(&rdev->fence_drv.seq, 0);
35273 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
35274 INIT_LIST_HEAD(&rdev->fence_drv.created);
35275 INIT_LIST_HEAD(&rdev->fence_drv.emited);
35276 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
35277 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
35278 index a1bf11d..4a123c0 100644
35279 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
35280 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
35281 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35282 request = compat_alloc_user_space(sizeof(*request));
35283 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
35284 || __put_user(req32.param, &request->param)
35285 - || __put_user((void __user *)(unsigned long)req32.value,
35286 + || __put_user((unsigned long)req32.value,
35287 &request->value))
35288 return -EFAULT;
35289
35290 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
35291 index b79ecc4..8dab92d 100644
35292 --- a/drivers/gpu/drm/radeon/radeon_irq.c
35293 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
35294 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
35295 unsigned int ret;
35296 RING_LOCALS;
35297
35298 - atomic_inc(&dev_priv->swi_emitted);
35299 - ret = atomic_read(&dev_priv->swi_emitted);
35300 + atomic_inc_unchecked(&dev_priv->swi_emitted);
35301 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
35302
35303 BEGIN_RING(4);
35304 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
35305 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
35306 drm_radeon_private_t *dev_priv =
35307 (drm_radeon_private_t *) dev->dev_private;
35308
35309 - atomic_set(&dev_priv->swi_emitted, 0);
35310 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
35311 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
35312
35313 dev->max_vblank_count = 0x001fffff;
35314 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
35315 index 4747910..48ca4b3 100644
35316 --- a/drivers/gpu/drm/radeon/radeon_state.c
35317 +++ b/drivers/gpu/drm/radeon/radeon_state.c
35318 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
35319 {
35320 drm_radeon_private_t *dev_priv = dev->dev_private;
35321 drm_radeon_getparam_t *param = data;
35322 - int value;
35323 + int value = 0;
35324
35325 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
35326
35327 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
35328 index 1381e06..0e53b17 100644
35329 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
35330 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
35331 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
35332 DRM_INFO("radeon: ttm finalized\n");
35333 }
35334
35335 -static struct vm_operations_struct radeon_ttm_vm_ops;
35336 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
35337 -
35338 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35339 -{
35340 - struct ttm_buffer_object *bo;
35341 - int r;
35342 -
35343 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
35344 - if (bo == NULL) {
35345 - return VM_FAULT_NOPAGE;
35346 - }
35347 - r = ttm_vm_ops->fault(vma, vmf);
35348 - return r;
35349 -}
35350 -
35351 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35352 {
35353 struct drm_file *file_priv;
35354 struct radeon_device *rdev;
35355 - int r;
35356
35357 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
35358 return drm_mmap(filp, vma);
35359 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35360
35361 file_priv = (struct drm_file *)filp->private_data;
35362 rdev = file_priv->minor->dev->dev_private;
35363 - if (rdev == NULL) {
35364 + if (!rdev)
35365 return -EINVAL;
35366 - }
35367 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
35368 - if (unlikely(r != 0)) {
35369 - return r;
35370 - }
35371 - if (unlikely(ttm_vm_ops == NULL)) {
35372 - ttm_vm_ops = vma->vm_ops;
35373 - radeon_ttm_vm_ops = *ttm_vm_ops;
35374 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
35375 - }
35376 - vma->vm_ops = &radeon_ttm_vm_ops;
35377 - return 0;
35378 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
35379 }
35380
35381
35382 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
35383 index b12ff76..0bd0c6e 100644
35384 --- a/drivers/gpu/drm/radeon/rs690.c
35385 +++ b/drivers/gpu/drm/radeon/rs690.c
35386 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
35387 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
35388 rdev->pm.sideport_bandwidth.full)
35389 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
35390 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
35391 + read_delay_latency.full = rfixed_const(800 * 1000);
35392 read_delay_latency.full = rfixed_div(read_delay_latency,
35393 rdev->pm.igp_sideport_mclk);
35394 + a.full = rfixed_const(370);
35395 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
35396 } else {
35397 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
35398 rdev->pm.k8_bandwidth.full)
35399 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
35400 index 0ed436e..e6e7ce3 100644
35401 --- a/drivers/gpu/drm/ttm/ttm_bo.c
35402 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
35403 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
35404 NULL
35405 };
35406
35407 -static struct sysfs_ops ttm_bo_global_ops = {
35408 +static const struct sysfs_ops ttm_bo_global_ops = {
35409 .show = &ttm_bo_global_show
35410 };
35411
35412 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
35413 index 1c040d0..f9e4af8 100644
35414 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
35415 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
35416 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35417 {
35418 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
35419 vma->vm_private_data;
35420 - struct ttm_bo_device *bdev = bo->bdev;
35421 + struct ttm_bo_device *bdev;
35422 unsigned long bus_base;
35423 unsigned long bus_offset;
35424 unsigned long bus_size;
35425 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35426 unsigned long address = (unsigned long)vmf->virtual_address;
35427 int retval = VM_FAULT_NOPAGE;
35428
35429 + if (!bo)
35430 + return VM_FAULT_NOPAGE;
35431 + bdev = bo->bdev;
35432 +
35433 /*
35434 * Work around locking order reversal in fault / nopfn
35435 * between mmap_sem and bo_reserve: Perform a trylock operation
35436 diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
35437 index b170071..28ae90e 100644
35438 --- a/drivers/gpu/drm/ttm/ttm_global.c
35439 +++ b/drivers/gpu/drm/ttm/ttm_global.c
35440 @@ -36,7 +36,7 @@
35441 struct ttm_global_item {
35442 struct mutex mutex;
35443 void *object;
35444 - int refcount;
35445 + atomic_t refcount;
35446 };
35447
35448 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
35449 @@ -49,7 +49,7 @@ void ttm_global_init(void)
35450 struct ttm_global_item *item = &glob[i];
35451 mutex_init(&item->mutex);
35452 item->object = NULL;
35453 - item->refcount = 0;
35454 + atomic_set(&item->refcount, 0);
35455 }
35456 }
35457
35458 @@ -59,7 +59,7 @@ void ttm_global_release(void)
35459 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
35460 struct ttm_global_item *item = &glob[i];
35461 BUG_ON(item->object != NULL);
35462 - BUG_ON(item->refcount != 0);
35463 + BUG_ON(atomic_read(&item->refcount) != 0);
35464 }
35465 }
35466
35467 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
35468 void *object;
35469
35470 mutex_lock(&item->mutex);
35471 - if (item->refcount == 0) {
35472 + if (atomic_read(&item->refcount) == 0) {
35473 item->object = kzalloc(ref->size, GFP_KERNEL);
35474 if (unlikely(item->object == NULL)) {
35475 ret = -ENOMEM;
35476 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
35477 goto out_err;
35478
35479 }
35480 - ++item->refcount;
35481 + atomic_inc(&item->refcount);
35482 ref->object = item->object;
35483 object = item->object;
35484 mutex_unlock(&item->mutex);
35485 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
35486 struct ttm_global_item *item = &glob[ref->global_type];
35487
35488 mutex_lock(&item->mutex);
35489 - BUG_ON(item->refcount == 0);
35490 + BUG_ON(atomic_read(&item->refcount) == 0);
35491 BUG_ON(ref->object != item->object);
35492 - if (--item->refcount == 0) {
35493 + if (atomic_dec_and_test(&item->refcount)) {
35494 ref->release(ref);
35495 item->object = NULL;
35496 }
35497 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
35498 index 072c281..d8ef483 100644
35499 --- a/drivers/gpu/drm/ttm/ttm_memory.c
35500 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
35501 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
35502 NULL
35503 };
35504
35505 -static struct sysfs_ops ttm_mem_zone_ops = {
35506 +static const struct sysfs_ops ttm_mem_zone_ops = {
35507 .show = &ttm_mem_zone_show,
35508 .store = &ttm_mem_zone_store
35509 };
35510 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
35511 index cafcb84..b8e66cc 100644
35512 --- a/drivers/gpu/drm/via/via_drv.h
35513 +++ b/drivers/gpu/drm/via/via_drv.h
35514 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
35515 typedef uint32_t maskarray_t[5];
35516
35517 typedef struct drm_via_irq {
35518 - atomic_t irq_received;
35519 + atomic_unchecked_t irq_received;
35520 uint32_t pending_mask;
35521 uint32_t enable_mask;
35522 wait_queue_head_t irq_queue;
35523 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
35524 struct timeval last_vblank;
35525 int last_vblank_valid;
35526 unsigned usec_per_vblank;
35527 - atomic_t vbl_received;
35528 + atomic_unchecked_t vbl_received;
35529 drm_via_state_t hc_state;
35530 char pci_buf[VIA_PCI_BUF_SIZE];
35531 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
35532 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
35533 index 5935b88..127a8a6 100644
35534 --- a/drivers/gpu/drm/via/via_irq.c
35535 +++ b/drivers/gpu/drm/via/via_irq.c
35536 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
35537 if (crtc != 0)
35538 return 0;
35539
35540 - return atomic_read(&dev_priv->vbl_received);
35541 + return atomic_read_unchecked(&dev_priv->vbl_received);
35542 }
35543
35544 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35545 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35546
35547 status = VIA_READ(VIA_REG_INTERRUPT);
35548 if (status & VIA_IRQ_VBLANK_PENDING) {
35549 - atomic_inc(&dev_priv->vbl_received);
35550 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
35551 + atomic_inc_unchecked(&dev_priv->vbl_received);
35552 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
35553 do_gettimeofday(&cur_vblank);
35554 if (dev_priv->last_vblank_valid) {
35555 dev_priv->usec_per_vblank =
35556 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35557 dev_priv->last_vblank = cur_vblank;
35558 dev_priv->last_vblank_valid = 1;
35559 }
35560 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
35561 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
35562 DRM_DEBUG("US per vblank is: %u\n",
35563 dev_priv->usec_per_vblank);
35564 }
35565 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35566
35567 for (i = 0; i < dev_priv->num_irqs; ++i) {
35568 if (status & cur_irq->pending_mask) {
35569 - atomic_inc(&cur_irq->irq_received);
35570 + atomic_inc_unchecked(&cur_irq->irq_received);
35571 DRM_WAKEUP(&cur_irq->irq_queue);
35572 handled = 1;
35573 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
35574 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
35575 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35576 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
35577 masks[irq][4]));
35578 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
35579 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
35580 } else {
35581 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35582 (((cur_irq_sequence =
35583 - atomic_read(&cur_irq->irq_received)) -
35584 + atomic_read_unchecked(&cur_irq->irq_received)) -
35585 *sequence) <= (1 << 23)));
35586 }
35587 *sequence = cur_irq_sequence;
35588 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
35589 }
35590
35591 for (i = 0; i < dev_priv->num_irqs; ++i) {
35592 - atomic_set(&cur_irq->irq_received, 0);
35593 + atomic_set_unchecked(&cur_irq->irq_received, 0);
35594 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
35595 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
35596 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
35597 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
35598 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
35599 case VIA_IRQ_RELATIVE:
35600 irqwait->request.sequence +=
35601 - atomic_read(&cur_irq->irq_received);
35602 + atomic_read_unchecked(&cur_irq->irq_received);
35603 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
35604 case VIA_IRQ_ABSOLUTE:
35605 break;
35606 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
35607 index aa8688d..6a0140c 100644
35608 --- a/drivers/gpu/vga/vgaarb.c
35609 +++ b/drivers/gpu/vga/vgaarb.c
35610 @@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
35611 uc = &priv->cards[i];
35612 }
35613
35614 - if (!uc)
35615 - return -EINVAL;
35616 + if (!uc) {
35617 + ret_val = -EINVAL;
35618 + goto done;
35619 + }
35620
35621 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
35622 - return -EINVAL;
35623 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
35624 + ret_val = -EINVAL;
35625 + goto done;
35626 + }
35627
35628 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
35629 - return -EINVAL;
35630 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
35631 + ret_val = -EINVAL;
35632 + goto done;
35633 + }
35634
35635 vga_put(pdev, io_state);
35636
35637 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
35638 index 11f8069..4783396 100644
35639 --- a/drivers/hid/hid-core.c
35640 +++ b/drivers/hid/hid-core.c
35641 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
35642
35643 int hid_add_device(struct hid_device *hdev)
35644 {
35645 - static atomic_t id = ATOMIC_INIT(0);
35646 + static atomic_unchecked_t id = ATOMIC_INIT(0);
35647 int ret;
35648
35649 if (WARN_ON(hdev->status & HID_STAT_ADDED))
35650 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
35651 /* XXX hack, any other cleaner solution after the driver core
35652 * is converted to allow more than 20 bytes as the device name? */
35653 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
35654 - hdev->vendor, hdev->product, atomic_inc_return(&id));
35655 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
35656
35657 ret = device_add(&hdev->dev);
35658 if (!ret)
35659 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
35660 index 8b6ee24..70f657d 100644
35661 --- a/drivers/hid/usbhid/hiddev.c
35662 +++ b/drivers/hid/usbhid/hiddev.c
35663 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
35664 return put_user(HID_VERSION, (int __user *)arg);
35665
35666 case HIDIOCAPPLICATION:
35667 - if (arg < 0 || arg >= hid->maxapplication)
35668 + if (arg >= hid->maxapplication)
35669 return -EINVAL;
35670
35671 for (i = 0; i < hid->maxcollection; i++)
35672 diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
35673 index 5d5ed69..f40533e 100644
35674 --- a/drivers/hwmon/lis3lv02d.c
35675 +++ b/drivers/hwmon/lis3lv02d.c
35676 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
35677 * the lid is closed. This leads to interrupts as soon as a little move
35678 * is done.
35679 */
35680 - atomic_inc(&lis3_dev.count);
35681 + atomic_inc_unchecked(&lis3_dev.count);
35682
35683 wake_up_interruptible(&lis3_dev.misc_wait);
35684 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
35685 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
35686 if (test_and_set_bit(0, &lis3_dev.misc_opened))
35687 return -EBUSY; /* already open */
35688
35689 - atomic_set(&lis3_dev.count, 0);
35690 + atomic_set_unchecked(&lis3_dev.count, 0);
35691
35692 /*
35693 * The sensor can generate interrupts for free-fall and direction
35694 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
35695 add_wait_queue(&lis3_dev.misc_wait, &wait);
35696 while (true) {
35697 set_current_state(TASK_INTERRUPTIBLE);
35698 - data = atomic_xchg(&lis3_dev.count, 0);
35699 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
35700 if (data)
35701 break;
35702
35703 @@ -244,7 +244,7 @@ out:
35704 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
35705 {
35706 poll_wait(file, &lis3_dev.misc_wait, wait);
35707 - if (atomic_read(&lis3_dev.count))
35708 + if (atomic_read_unchecked(&lis3_dev.count))
35709 return POLLIN | POLLRDNORM;
35710 return 0;
35711 }
35712 diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
35713 index 7cdd76f..fe0efdf 100644
35714 --- a/drivers/hwmon/lis3lv02d.h
35715 +++ b/drivers/hwmon/lis3lv02d.h
35716 @@ -201,7 +201,7 @@ struct lis3lv02d {
35717
35718 struct input_polled_dev *idev; /* input device */
35719 struct platform_device *pdev; /* platform device */
35720 - atomic_t count; /* interrupt count after last read */
35721 + atomic_unchecked_t count; /* interrupt count after last read */
35722 int xcalib; /* calibrated null value for x */
35723 int ycalib; /* calibrated null value for y */
35724 int zcalib; /* calibrated null value for z */
35725 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
35726 index 740785e..5a5c6c6 100644
35727 --- a/drivers/hwmon/sht15.c
35728 +++ b/drivers/hwmon/sht15.c
35729 @@ -112,7 +112,7 @@ struct sht15_data {
35730 int supply_uV;
35731 int supply_uV_valid;
35732 struct work_struct update_supply_work;
35733 - atomic_t interrupt_handled;
35734 + atomic_unchecked_t interrupt_handled;
35735 };
35736
35737 /**
35738 @@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
35739 return ret;
35740
35741 gpio_direction_input(data->pdata->gpio_data);
35742 - atomic_set(&data->interrupt_handled, 0);
35743 + atomic_set_unchecked(&data->interrupt_handled, 0);
35744
35745 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35746 if (gpio_get_value(data->pdata->gpio_data) == 0) {
35747 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
35748 /* Only relevant if the interrupt hasn't occured. */
35749 - if (!atomic_read(&data->interrupt_handled))
35750 + if (!atomic_read_unchecked(&data->interrupt_handled))
35751 schedule_work(&data->read_work);
35752 }
35753 ret = wait_event_timeout(data->wait_queue,
35754 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
35755 struct sht15_data *data = d;
35756 /* First disable the interrupt */
35757 disable_irq_nosync(irq);
35758 - atomic_inc(&data->interrupt_handled);
35759 + atomic_inc_unchecked(&data->interrupt_handled);
35760 /* Then schedule a reading work struct */
35761 if (data->flag != SHT15_READING_NOTHING)
35762 schedule_work(&data->read_work);
35763 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
35764 here as could have gone low in meantime so verify
35765 it hasn't!
35766 */
35767 - atomic_set(&data->interrupt_handled, 0);
35768 + atomic_set_unchecked(&data->interrupt_handled, 0);
35769 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35770 /* If still not occured or another handler has been scheduled */
35771 if (gpio_get_value(data->pdata->gpio_data)
35772 - || atomic_read(&data->interrupt_handled))
35773 + || atomic_read_unchecked(&data->interrupt_handled))
35774 return;
35775 }
35776 /* Read the data back from the device */
35777 diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
35778 index 97851c5..cb40626 100644
35779 --- a/drivers/hwmon/w83791d.c
35780 +++ b/drivers/hwmon/w83791d.c
35781 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
35782 struct i2c_board_info *info);
35783 static int w83791d_remove(struct i2c_client *client);
35784
35785 -static int w83791d_read(struct i2c_client *client, u8 register);
35786 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
35787 +static int w83791d_read(struct i2c_client *client, u8 reg);
35788 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
35789 static struct w83791d_data *w83791d_update_device(struct device *dev);
35790
35791 #ifdef DEBUG
35792 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
35793 index 378fcb5..5e91fa8 100644
35794 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
35795 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
35796 @@ -43,7 +43,7 @@
35797 extern struct i2c_adapter amd756_smbus;
35798
35799 static struct i2c_adapter *s4882_adapter;
35800 -static struct i2c_algorithm *s4882_algo;
35801 +static i2c_algorithm_no_const *s4882_algo;
35802
35803 /* Wrapper access functions for multiplexed SMBus */
35804 static DEFINE_MUTEX(amd756_lock);
35805 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
35806 index 29015eb..af2d8e9 100644
35807 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
35808 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
35809 @@ -41,7 +41,7 @@
35810 extern struct i2c_adapter *nforce2_smbus;
35811
35812 static struct i2c_adapter *s4985_adapter;
35813 -static struct i2c_algorithm *s4985_algo;
35814 +static i2c_algorithm_no_const *s4985_algo;
35815
35816 /* Wrapper access functions for multiplexed SMBus */
35817 static DEFINE_MUTEX(nforce2_lock);
35818 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
35819 index 878f8ec..12376fc 100644
35820 --- a/drivers/ide/aec62xx.c
35821 +++ b/drivers/ide/aec62xx.c
35822 @@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
35823 .cable_detect = atp86x_cable_detect,
35824 };
35825
35826 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
35827 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
35828 { /* 0: AEC6210 */
35829 .name = DRV_NAME,
35830 .init_chipset = init_chipset_aec62xx,
35831 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
35832 index e59b6de..4b4fc65 100644
35833 --- a/drivers/ide/alim15x3.c
35834 +++ b/drivers/ide/alim15x3.c
35835 @@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
35836 .dma_sff_read_status = ide_dma_sff_read_status,
35837 };
35838
35839 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
35840 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
35841 .name = DRV_NAME,
35842 .init_chipset = init_chipset_ali15x3,
35843 .init_hwif = init_hwif_ali15x3,
35844 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
35845 index 628cd2e..087a414 100644
35846 --- a/drivers/ide/amd74xx.c
35847 +++ b/drivers/ide/amd74xx.c
35848 @@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
35849 .udma_mask = udma, \
35850 }
35851
35852 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
35853 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
35854 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
35855 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
35856 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
35857 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
35858 index 837322b..837fd71 100644
35859 --- a/drivers/ide/atiixp.c
35860 +++ b/drivers/ide/atiixp.c
35861 @@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
35862 .cable_detect = atiixp_cable_detect,
35863 };
35864
35865 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
35866 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
35867 { /* 0: IXP200/300/400/700 */
35868 .name = DRV_NAME,
35869 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
35870 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
35871 index ca0c46f..d55318a 100644
35872 --- a/drivers/ide/cmd64x.c
35873 +++ b/drivers/ide/cmd64x.c
35874 @@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
35875 .dma_sff_read_status = ide_dma_sff_read_status,
35876 };
35877
35878 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
35879 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
35880 { /* 0: CMD643 */
35881 .name = DRV_NAME,
35882 .init_chipset = init_chipset_cmd64x,
35883 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
35884 index 09f98ed..cebc5bc 100644
35885 --- a/drivers/ide/cs5520.c
35886 +++ b/drivers/ide/cs5520.c
35887 @@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
35888 .set_dma_mode = cs5520_set_dma_mode,
35889 };
35890
35891 -static const struct ide_port_info cyrix_chipset __devinitdata = {
35892 +static const struct ide_port_info cyrix_chipset __devinitconst = {
35893 .name = DRV_NAME,
35894 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
35895 .port_ops = &cs5520_port_ops,
35896 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
35897 index 40bf05e..7d58ca0 100644
35898 --- a/drivers/ide/cs5530.c
35899 +++ b/drivers/ide/cs5530.c
35900 @@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
35901 .udma_filter = cs5530_udma_filter,
35902 };
35903
35904 -static const struct ide_port_info cs5530_chipset __devinitdata = {
35905 +static const struct ide_port_info cs5530_chipset __devinitconst = {
35906 .name = DRV_NAME,
35907 .init_chipset = init_chipset_cs5530,
35908 .init_hwif = init_hwif_cs5530,
35909 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
35910 index 983d957..53e6172 100644
35911 --- a/drivers/ide/cs5535.c
35912 +++ b/drivers/ide/cs5535.c
35913 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
35914 .cable_detect = cs5535_cable_detect,
35915 };
35916
35917 -static const struct ide_port_info cs5535_chipset __devinitdata = {
35918 +static const struct ide_port_info cs5535_chipset __devinitconst = {
35919 .name = DRV_NAME,
35920 .port_ops = &cs5535_port_ops,
35921 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
35922 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
35923 index 74fc540..8e933d8 100644
35924 --- a/drivers/ide/cy82c693.c
35925 +++ b/drivers/ide/cy82c693.c
35926 @@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
35927 .set_dma_mode = cy82c693_set_dma_mode,
35928 };
35929
35930 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
35931 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
35932 .name = DRV_NAME,
35933 .init_iops = init_iops_cy82c693,
35934 .port_ops = &cy82c693_port_ops,
35935 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
35936 index 7ce68ef..e78197d 100644
35937 --- a/drivers/ide/hpt366.c
35938 +++ b/drivers/ide/hpt366.c
35939 @@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
35940 }
35941 };
35942
35943 -static const struct hpt_info hpt36x __devinitdata = {
35944 +static const struct hpt_info hpt36x __devinitconst = {
35945 .chip_name = "HPT36x",
35946 .chip_type = HPT36x,
35947 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
35948 @@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
35949 .timings = &hpt36x_timings
35950 };
35951
35952 -static const struct hpt_info hpt370 __devinitdata = {
35953 +static const struct hpt_info hpt370 __devinitconst = {
35954 .chip_name = "HPT370",
35955 .chip_type = HPT370,
35956 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35957 @@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
35958 .timings = &hpt37x_timings
35959 };
35960
35961 -static const struct hpt_info hpt370a __devinitdata = {
35962 +static const struct hpt_info hpt370a __devinitconst = {
35963 .chip_name = "HPT370A",
35964 .chip_type = HPT370A,
35965 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35966 @@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
35967 .timings = &hpt37x_timings
35968 };
35969
35970 -static const struct hpt_info hpt374 __devinitdata = {
35971 +static const struct hpt_info hpt374 __devinitconst = {
35972 .chip_name = "HPT374",
35973 .chip_type = HPT374,
35974 .udma_mask = ATA_UDMA5,
35975 @@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
35976 .timings = &hpt37x_timings
35977 };
35978
35979 -static const struct hpt_info hpt372 __devinitdata = {
35980 +static const struct hpt_info hpt372 __devinitconst = {
35981 .chip_name = "HPT372",
35982 .chip_type = HPT372,
35983 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35984 @@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
35985 .timings = &hpt37x_timings
35986 };
35987
35988 -static const struct hpt_info hpt372a __devinitdata = {
35989 +static const struct hpt_info hpt372a __devinitconst = {
35990 .chip_name = "HPT372A",
35991 .chip_type = HPT372A,
35992 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35993 @@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
35994 .timings = &hpt37x_timings
35995 };
35996
35997 -static const struct hpt_info hpt302 __devinitdata = {
35998 +static const struct hpt_info hpt302 __devinitconst = {
35999 .chip_name = "HPT302",
36000 .chip_type = HPT302,
36001 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36002 @@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
36003 .timings = &hpt37x_timings
36004 };
36005
36006 -static const struct hpt_info hpt371 __devinitdata = {
36007 +static const struct hpt_info hpt371 __devinitconst = {
36008 .chip_name = "HPT371",
36009 .chip_type = HPT371,
36010 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36011 @@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
36012 .timings = &hpt37x_timings
36013 };
36014
36015 -static const struct hpt_info hpt372n __devinitdata = {
36016 +static const struct hpt_info hpt372n __devinitconst = {
36017 .chip_name = "HPT372N",
36018 .chip_type = HPT372N,
36019 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36020 @@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
36021 .timings = &hpt37x_timings
36022 };
36023
36024 -static const struct hpt_info hpt302n __devinitdata = {
36025 +static const struct hpt_info hpt302n __devinitconst = {
36026 .chip_name = "HPT302N",
36027 .chip_type = HPT302N,
36028 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36029 @@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
36030 .timings = &hpt37x_timings
36031 };
36032
36033 -static const struct hpt_info hpt371n __devinitdata = {
36034 +static const struct hpt_info hpt371n __devinitconst = {
36035 .chip_name = "HPT371N",
36036 .chip_type = HPT371N,
36037 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36038 @@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
36039 .dma_sff_read_status = ide_dma_sff_read_status,
36040 };
36041
36042 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
36043 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
36044 { /* 0: HPT36x */
36045 .name = DRV_NAME,
36046 .init_chipset = init_chipset_hpt366,
36047 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36048 index 2de76cc..74186a1 100644
36049 --- a/drivers/ide/ide-cd.c
36050 +++ b/drivers/ide/ide-cd.c
36051 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
36052 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
36053 if ((unsigned long)buf & alignment
36054 || blk_rq_bytes(rq) & q->dma_pad_mask
36055 - || object_is_on_stack(buf))
36056 + || object_starts_on_stack(buf))
36057 drive->dma = 0;
36058 }
36059 }
36060 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
36061 index fefbdfc..62ff465 100644
36062 --- a/drivers/ide/ide-floppy.c
36063 +++ b/drivers/ide/ide-floppy.c
36064 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
36065 u8 pc_buf[256], header_len, desc_cnt;
36066 int i, rc = 1, blocks, length;
36067
36068 + pax_track_stack();
36069 +
36070 ide_debug_log(IDE_DBG_FUNC, "enter");
36071
36072 drive->bios_cyl = 0;
36073 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
36074 index 39d4e01..11538ce 100644
36075 --- a/drivers/ide/ide-pci-generic.c
36076 +++ b/drivers/ide/ide-pci-generic.c
36077 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
36078 .udma_mask = ATA_UDMA6, \
36079 }
36080
36081 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
36082 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
36083 /* 0: Unknown */
36084 DECLARE_GENERIC_PCI_DEV(0),
36085
36086 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
36087 index 0d266a5..aaca790 100644
36088 --- a/drivers/ide/it8172.c
36089 +++ b/drivers/ide/it8172.c
36090 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
36091 .set_dma_mode = it8172_set_dma_mode,
36092 };
36093
36094 -static const struct ide_port_info it8172_port_info __devinitdata = {
36095 +static const struct ide_port_info it8172_port_info __devinitconst = {
36096 .name = DRV_NAME,
36097 .port_ops = &it8172_port_ops,
36098 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
36099 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
36100 index 4797616..4be488a 100644
36101 --- a/drivers/ide/it8213.c
36102 +++ b/drivers/ide/it8213.c
36103 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
36104 .cable_detect = it8213_cable_detect,
36105 };
36106
36107 -static const struct ide_port_info it8213_chipset __devinitdata = {
36108 +static const struct ide_port_info it8213_chipset __devinitconst = {
36109 .name = DRV_NAME,
36110 .enablebits = { {0x41, 0x80, 0x80} },
36111 .port_ops = &it8213_port_ops,
36112 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
36113 index 51aa745..146ee60 100644
36114 --- a/drivers/ide/it821x.c
36115 +++ b/drivers/ide/it821x.c
36116 @@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
36117 .cable_detect = it821x_cable_detect,
36118 };
36119
36120 -static const struct ide_port_info it821x_chipset __devinitdata = {
36121 +static const struct ide_port_info it821x_chipset __devinitconst = {
36122 .name = DRV_NAME,
36123 .init_chipset = init_chipset_it821x,
36124 .init_hwif = init_hwif_it821x,
36125 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
36126 index bf2be64..9270098 100644
36127 --- a/drivers/ide/jmicron.c
36128 +++ b/drivers/ide/jmicron.c
36129 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
36130 .cable_detect = jmicron_cable_detect,
36131 };
36132
36133 -static const struct ide_port_info jmicron_chipset __devinitdata = {
36134 +static const struct ide_port_info jmicron_chipset __devinitconst = {
36135 .name = DRV_NAME,
36136 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
36137 .port_ops = &jmicron_port_ops,
36138 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
36139 index 95327a2..73f78d8 100644
36140 --- a/drivers/ide/ns87415.c
36141 +++ b/drivers/ide/ns87415.c
36142 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
36143 .dma_sff_read_status = superio_dma_sff_read_status,
36144 };
36145
36146 -static const struct ide_port_info ns87415_chipset __devinitdata = {
36147 +static const struct ide_port_info ns87415_chipset __devinitconst = {
36148 .name = DRV_NAME,
36149 .init_hwif = init_hwif_ns87415,
36150 .tp_ops = &ns87415_tp_ops,
36151 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
36152 index f1d70d6..e1de05b 100644
36153 --- a/drivers/ide/opti621.c
36154 +++ b/drivers/ide/opti621.c
36155 @@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
36156 .set_pio_mode = opti621_set_pio_mode,
36157 };
36158
36159 -static const struct ide_port_info opti621_chipset __devinitdata = {
36160 +static const struct ide_port_info opti621_chipset __devinitconst = {
36161 .name = DRV_NAME,
36162 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
36163 .port_ops = &opti621_port_ops,
36164 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
36165 index 65ba823..7311f4d 100644
36166 --- a/drivers/ide/pdc202xx_new.c
36167 +++ b/drivers/ide/pdc202xx_new.c
36168 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
36169 .udma_mask = udma, \
36170 }
36171
36172 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
36173 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
36174 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
36175 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
36176 };
36177 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
36178 index cb812f3..af816ef 100644
36179 --- a/drivers/ide/pdc202xx_old.c
36180 +++ b/drivers/ide/pdc202xx_old.c
36181 @@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
36182 .max_sectors = sectors, \
36183 }
36184
36185 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
36186 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
36187 { /* 0: PDC20246 */
36188 .name = DRV_NAME,
36189 .init_chipset = init_chipset_pdc202xx,
36190 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
36191 index bf14f39..15c4b98 100644
36192 --- a/drivers/ide/piix.c
36193 +++ b/drivers/ide/piix.c
36194 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
36195 .udma_mask = udma, \
36196 }
36197
36198 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
36199 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
36200 /* 0: MPIIX */
36201 { /*
36202 * MPIIX actually has only a single IDE channel mapped to
36203 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
36204 index a6414a8..c04173e 100644
36205 --- a/drivers/ide/rz1000.c
36206 +++ b/drivers/ide/rz1000.c
36207 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
36208 }
36209 }
36210
36211 -static const struct ide_port_info rz1000_chipset __devinitdata = {
36212 +static const struct ide_port_info rz1000_chipset __devinitconst = {
36213 .name = DRV_NAME,
36214 .host_flags = IDE_HFLAG_NO_DMA,
36215 };
36216 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
36217 index d467478..9203942 100644
36218 --- a/drivers/ide/sc1200.c
36219 +++ b/drivers/ide/sc1200.c
36220 @@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
36221 .dma_sff_read_status = ide_dma_sff_read_status,
36222 };
36223
36224 -static const struct ide_port_info sc1200_chipset __devinitdata = {
36225 +static const struct ide_port_info sc1200_chipset __devinitconst = {
36226 .name = DRV_NAME,
36227 .port_ops = &sc1200_port_ops,
36228 .dma_ops = &sc1200_dma_ops,
36229 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
36230 index 1104bb3..59c5194 100644
36231 --- a/drivers/ide/scc_pata.c
36232 +++ b/drivers/ide/scc_pata.c
36233 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
36234 .dma_sff_read_status = scc_dma_sff_read_status,
36235 };
36236
36237 -static const struct ide_port_info scc_chipset __devinitdata = {
36238 +static const struct ide_port_info scc_chipset __devinitconst = {
36239 .name = "sccIDE",
36240 .init_iops = init_iops_scc,
36241 .init_dma = scc_init_dma,
36242 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
36243 index b6554ef..6cc2cc3 100644
36244 --- a/drivers/ide/serverworks.c
36245 +++ b/drivers/ide/serverworks.c
36246 @@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
36247 .cable_detect = svwks_cable_detect,
36248 };
36249
36250 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
36251 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
36252 { /* 0: OSB4 */
36253 .name = DRV_NAME,
36254 .init_chipset = init_chipset_svwks,
36255 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
36256 index ab3db61..afed580 100644
36257 --- a/drivers/ide/setup-pci.c
36258 +++ b/drivers/ide/setup-pci.c
36259 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
36260 int ret, i, n_ports = dev2 ? 4 : 2;
36261 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
36262
36263 + pax_track_stack();
36264 +
36265 for (i = 0; i < n_ports / 2; i++) {
36266 ret = ide_setup_pci_controller(pdev[i], d, !i);
36267 if (ret < 0)
36268 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
36269 index d95df52..0b03a39 100644
36270 --- a/drivers/ide/siimage.c
36271 +++ b/drivers/ide/siimage.c
36272 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
36273 .udma_mask = ATA_UDMA6, \
36274 }
36275
36276 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
36277 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
36278 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
36279 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
36280 };
36281 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
36282 index 3b88eba..ca8699d 100644
36283 --- a/drivers/ide/sis5513.c
36284 +++ b/drivers/ide/sis5513.c
36285 @@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
36286 .cable_detect = sis_cable_detect,
36287 };
36288
36289 -static const struct ide_port_info sis5513_chipset __devinitdata = {
36290 +static const struct ide_port_info sis5513_chipset __devinitconst = {
36291 .name = DRV_NAME,
36292 .init_chipset = init_chipset_sis5513,
36293 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
36294 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
36295 index d698da4..fca42a4 100644
36296 --- a/drivers/ide/sl82c105.c
36297 +++ b/drivers/ide/sl82c105.c
36298 @@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
36299 .dma_sff_read_status = ide_dma_sff_read_status,
36300 };
36301
36302 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
36303 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
36304 .name = DRV_NAME,
36305 .init_chipset = init_chipset_sl82c105,
36306 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
36307 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
36308 index 1ccfb40..83d5779 100644
36309 --- a/drivers/ide/slc90e66.c
36310 +++ b/drivers/ide/slc90e66.c
36311 @@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
36312 .cable_detect = slc90e66_cable_detect,
36313 };
36314
36315 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
36316 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
36317 .name = DRV_NAME,
36318 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
36319 .port_ops = &slc90e66_port_ops,
36320 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
36321 index 05a93d6..5f9e325 100644
36322 --- a/drivers/ide/tc86c001.c
36323 +++ b/drivers/ide/tc86c001.c
36324 @@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
36325 .dma_sff_read_status = ide_dma_sff_read_status,
36326 };
36327
36328 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
36329 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
36330 .name = DRV_NAME,
36331 .init_hwif = init_hwif_tc86c001,
36332 .port_ops = &tc86c001_port_ops,
36333 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
36334 index 8773c3b..7907d6c 100644
36335 --- a/drivers/ide/triflex.c
36336 +++ b/drivers/ide/triflex.c
36337 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
36338 .set_dma_mode = triflex_set_mode,
36339 };
36340
36341 -static const struct ide_port_info triflex_device __devinitdata = {
36342 +static const struct ide_port_info triflex_device __devinitconst = {
36343 .name = DRV_NAME,
36344 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
36345 .port_ops = &triflex_port_ops,
36346 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
36347 index 4b42ca0..e494a98 100644
36348 --- a/drivers/ide/trm290.c
36349 +++ b/drivers/ide/trm290.c
36350 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
36351 .dma_check = trm290_dma_check,
36352 };
36353
36354 -static const struct ide_port_info trm290_chipset __devinitdata = {
36355 +static const struct ide_port_info trm290_chipset __devinitconst = {
36356 .name = DRV_NAME,
36357 .init_hwif = init_hwif_trm290,
36358 .tp_ops = &trm290_tp_ops,
36359 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
36360 index 028de26..520d5d5 100644
36361 --- a/drivers/ide/via82cxxx.c
36362 +++ b/drivers/ide/via82cxxx.c
36363 @@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
36364 .cable_detect = via82cxxx_cable_detect,
36365 };
36366
36367 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
36368 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
36369 .name = DRV_NAME,
36370 .init_chipset = init_chipset_via82cxxx,
36371 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
36372 diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
36373 index 2cd00b5..14de699 100644
36374 --- a/drivers/ieee1394/dv1394.c
36375 +++ b/drivers/ieee1394/dv1394.c
36376 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
36377 based upon DIF section and sequence
36378 */
36379
36380 -static void inline
36381 +static inline void
36382 frame_put_packet (struct frame *f, struct packet *p)
36383 {
36384 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
36385 diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
36386 index e947d8f..6a966b9 100644
36387 --- a/drivers/ieee1394/hosts.c
36388 +++ b/drivers/ieee1394/hosts.c
36389 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
36390 }
36391
36392 static struct hpsb_host_driver dummy_driver = {
36393 + .name = "dummy",
36394 .transmit_packet = dummy_transmit_packet,
36395 .devctl = dummy_devctl,
36396 .isoctl = dummy_isoctl
36397 diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
36398 index ddaab6e..8d37435 100644
36399 --- a/drivers/ieee1394/init_ohci1394_dma.c
36400 +++ b/drivers/ieee1394/init_ohci1394_dma.c
36401 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
36402 for (func = 0; func < 8; func++) {
36403 u32 class = read_pci_config(num,slot,func,
36404 PCI_CLASS_REVISION);
36405 - if ((class == 0xffffffff))
36406 + if (class == 0xffffffff)
36407 continue; /* No device at this func */
36408
36409 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
36410 diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
36411 index 65c1429..5d8c11f 100644
36412 --- a/drivers/ieee1394/ohci1394.c
36413 +++ b/drivers/ieee1394/ohci1394.c
36414 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
36415 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
36416
36417 /* Module Parameters */
36418 -static int phys_dma = 1;
36419 +static int phys_dma;
36420 module_param(phys_dma, int, 0444);
36421 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
36422 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
36423
36424 static void dma_trm_tasklet(unsigned long data);
36425 static void dma_trm_reset(struct dma_trm_ctx *d);
36426 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
36427 index f199896..78c9fc8 100644
36428 --- a/drivers/ieee1394/sbp2.c
36429 +++ b/drivers/ieee1394/sbp2.c
36430 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
36431 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
36432 MODULE_LICENSE("GPL");
36433
36434 -static int sbp2_module_init(void)
36435 +static int __init sbp2_module_init(void)
36436 {
36437 int ret;
36438
36439 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36440 index a5dea6b..0cefe8f 100644
36441 --- a/drivers/infiniband/core/cm.c
36442 +++ b/drivers/infiniband/core/cm.c
36443 @@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36444
36445 struct cm_counter_group {
36446 struct kobject obj;
36447 - atomic_long_t counter[CM_ATTR_COUNT];
36448 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36449 };
36450
36451 struct cm_counter_attribute {
36452 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36453 struct ib_mad_send_buf *msg = NULL;
36454 int ret;
36455
36456 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36457 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36458 counter[CM_REQ_COUNTER]);
36459
36460 /* Quick state check to discard duplicate REQs. */
36461 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36462 if (!cm_id_priv)
36463 return;
36464
36465 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36466 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36467 counter[CM_REP_COUNTER]);
36468 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36469 if (ret)
36470 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
36471 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36472 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36473 spin_unlock_irq(&cm_id_priv->lock);
36474 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36475 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36476 counter[CM_RTU_COUNTER]);
36477 goto out;
36478 }
36479 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
36480 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36481 dreq_msg->local_comm_id);
36482 if (!cm_id_priv) {
36483 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36484 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36485 counter[CM_DREQ_COUNTER]);
36486 cm_issue_drep(work->port, work->mad_recv_wc);
36487 return -EINVAL;
36488 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
36489 case IB_CM_MRA_REP_RCVD:
36490 break;
36491 case IB_CM_TIMEWAIT:
36492 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36493 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36494 counter[CM_DREQ_COUNTER]);
36495 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36496 goto unlock;
36497 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
36498 cm_free_msg(msg);
36499 goto deref;
36500 case IB_CM_DREQ_RCVD:
36501 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36502 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36503 counter[CM_DREQ_COUNTER]);
36504 goto unlock;
36505 default:
36506 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
36507 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36508 cm_id_priv->msg, timeout)) {
36509 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36510 - atomic_long_inc(&work->port->
36511 + atomic_long_inc_unchecked(&work->port->
36512 counter_group[CM_RECV_DUPLICATES].
36513 counter[CM_MRA_COUNTER]);
36514 goto out;
36515 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
36516 break;
36517 case IB_CM_MRA_REQ_RCVD:
36518 case IB_CM_MRA_REP_RCVD:
36519 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36520 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36521 counter[CM_MRA_COUNTER]);
36522 /* fall through */
36523 default:
36524 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
36525 case IB_CM_LAP_IDLE:
36526 break;
36527 case IB_CM_MRA_LAP_SENT:
36528 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36529 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36530 counter[CM_LAP_COUNTER]);
36531 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36532 goto unlock;
36533 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
36534 cm_free_msg(msg);
36535 goto deref;
36536 case IB_CM_LAP_RCVD:
36537 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36538 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36539 counter[CM_LAP_COUNTER]);
36540 goto unlock;
36541 default:
36542 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36543 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36544 if (cur_cm_id_priv) {
36545 spin_unlock_irq(&cm.lock);
36546 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36547 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36548 counter[CM_SIDR_REQ_COUNTER]);
36549 goto out; /* Duplicate message. */
36550 }
36551 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36552 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36553 msg->retries = 1;
36554
36555 - atomic_long_add(1 + msg->retries,
36556 + atomic_long_add_unchecked(1 + msg->retries,
36557 &port->counter_group[CM_XMIT].counter[attr_index]);
36558 if (msg->retries)
36559 - atomic_long_add(msg->retries,
36560 + atomic_long_add_unchecked(msg->retries,
36561 &port->counter_group[CM_XMIT_RETRIES].
36562 counter[attr_index]);
36563
36564 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36565 }
36566
36567 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36568 - atomic_long_inc(&port->counter_group[CM_RECV].
36569 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36570 counter[attr_id - CM_ATTR_ID_OFFSET]);
36571
36572 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36573 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36574 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36575
36576 return sprintf(buf, "%ld\n",
36577 - atomic_long_read(&group->counter[cm_attr->index]));
36578 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36579 }
36580
36581 -static struct sysfs_ops cm_counter_ops = {
36582 +static const struct sysfs_ops cm_counter_ops = {
36583 .show = cm_show_counter
36584 };
36585
36586 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
36587 index 8fd3a6f..61d8075 100644
36588 --- a/drivers/infiniband/core/cma.c
36589 +++ b/drivers/infiniband/core/cma.c
36590 @@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
36591
36592 req.private_data_len = sizeof(struct cma_hdr) +
36593 conn_param->private_data_len;
36594 + if (req.private_data_len < conn_param->private_data_len)
36595 + return -EINVAL;
36596 +
36597 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
36598 if (!req.private_data)
36599 return -ENOMEM;
36600 @@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
36601 memset(&req, 0, sizeof req);
36602 offset = cma_user_data_offset(id_priv->id.ps);
36603 req.private_data_len = offset + conn_param->private_data_len;
36604 + if (req.private_data_len < conn_param->private_data_len)
36605 + return -EINVAL;
36606 +
36607 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
36608 if (!private_data)
36609 return -ENOMEM;
36610 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36611 index 4507043..14ad522 100644
36612 --- a/drivers/infiniband/core/fmr_pool.c
36613 +++ b/drivers/infiniband/core/fmr_pool.c
36614 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
36615
36616 struct task_struct *thread;
36617
36618 - atomic_t req_ser;
36619 - atomic_t flush_ser;
36620 + atomic_unchecked_t req_ser;
36621 + atomic_unchecked_t flush_ser;
36622
36623 wait_queue_head_t force_wait;
36624 };
36625 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36626 struct ib_fmr_pool *pool = pool_ptr;
36627
36628 do {
36629 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36630 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36631 ib_fmr_batch_release(pool);
36632
36633 - atomic_inc(&pool->flush_ser);
36634 + atomic_inc_unchecked(&pool->flush_ser);
36635 wake_up_interruptible(&pool->force_wait);
36636
36637 if (pool->flush_function)
36638 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36639 }
36640
36641 set_current_state(TASK_INTERRUPTIBLE);
36642 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36643 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36644 !kthread_should_stop())
36645 schedule();
36646 __set_current_state(TASK_RUNNING);
36647 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36648 pool->dirty_watermark = params->dirty_watermark;
36649 pool->dirty_len = 0;
36650 spin_lock_init(&pool->pool_lock);
36651 - atomic_set(&pool->req_ser, 0);
36652 - atomic_set(&pool->flush_ser, 0);
36653 + atomic_set_unchecked(&pool->req_ser, 0);
36654 + atomic_set_unchecked(&pool->flush_ser, 0);
36655 init_waitqueue_head(&pool->force_wait);
36656
36657 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36658 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36659 }
36660 spin_unlock_irq(&pool->pool_lock);
36661
36662 - serial = atomic_inc_return(&pool->req_ser);
36663 + serial = atomic_inc_return_unchecked(&pool->req_ser);
36664 wake_up_process(pool->thread);
36665
36666 if (wait_event_interruptible(pool->force_wait,
36667 - atomic_read(&pool->flush_ser) - serial >= 0))
36668 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36669 return -EINTR;
36670
36671 return 0;
36672 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36673 } else {
36674 list_add_tail(&fmr->list, &pool->dirty_list);
36675 if (++pool->dirty_len >= pool->dirty_watermark) {
36676 - atomic_inc(&pool->req_ser);
36677 + atomic_inc_unchecked(&pool->req_ser);
36678 wake_up_process(pool->thread);
36679 }
36680 }
36681 diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
36682 index 158a214..1558bb7 100644
36683 --- a/drivers/infiniband/core/sysfs.c
36684 +++ b/drivers/infiniband/core/sysfs.c
36685 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
36686 return port_attr->show(p, port_attr, buf);
36687 }
36688
36689 -static struct sysfs_ops port_sysfs_ops = {
36690 +static const struct sysfs_ops port_sysfs_ops = {
36691 .show = port_attr_show
36692 };
36693
36694 diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
36695 index 5440da0..1194ecb 100644
36696 --- a/drivers/infiniband/core/uverbs_marshall.c
36697 +++ b/drivers/infiniband/core/uverbs_marshall.c
36698 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
36699 dst->grh.sgid_index = src->grh.sgid_index;
36700 dst->grh.hop_limit = src->grh.hop_limit;
36701 dst->grh.traffic_class = src->grh.traffic_class;
36702 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
36703 dst->dlid = src->dlid;
36704 dst->sl = src->sl;
36705 dst->src_path_bits = src->src_path_bits;
36706 dst->static_rate = src->static_rate;
36707 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
36708 dst->port_num = src->port_num;
36709 + dst->reserved = 0;
36710 }
36711 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
36712
36713 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
36714 struct ib_qp_attr *src)
36715 {
36716 + dst->qp_state = src->qp_state;
36717 dst->cur_qp_state = src->cur_qp_state;
36718 dst->path_mtu = src->path_mtu;
36719 dst->path_mig_state = src->path_mig_state;
36720 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
36721 dst->rnr_retry = src->rnr_retry;
36722 dst->alt_port_num = src->alt_port_num;
36723 dst->alt_timeout = src->alt_timeout;
36724 + memset(dst->reserved, 0, sizeof(dst->reserved));
36725 }
36726 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
36727
36728 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
36729 index 100da85..e0d6609 100644
36730 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
36731 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
36732 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
36733 struct infinipath_counters counters;
36734 struct ipath_devdata *dd;
36735
36736 + pax_track_stack();
36737 +
36738 dd = file->f_path.dentry->d_inode->i_private;
36739 dd->ipath_f_read_counters(dd, &counters);
36740
36741 @@ -122,6 +124,8 @@ static const struct file_operations atomic_counters_ops = {
36742 };
36743
36744 static ssize_t flash_read(struct file *file, char __user *buf,
36745 + size_t count, loff_t *ppos) __size_overflow(3);
36746 +static ssize_t flash_read(struct file *file, char __user *buf,
36747 size_t count, loff_t *ppos)
36748 {
36749 struct ipath_devdata *dd;
36750 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
36751 index cbde0cf..afaf55c 100644
36752 --- a/drivers/infiniband/hw/nes/nes.c
36753 +++ b/drivers/infiniband/hw/nes/nes.c
36754 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
36755 LIST_HEAD(nes_adapter_list);
36756 static LIST_HEAD(nes_dev_list);
36757
36758 -atomic_t qps_destroyed;
36759 +atomic_unchecked_t qps_destroyed;
36760
36761 static unsigned int ee_flsh_adapter;
36762 static unsigned int sysfs_nonidx_addr;
36763 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
36764 struct nes_adapter *nesadapter = nesdev->nesadapter;
36765 u32 qp_id;
36766
36767 - atomic_inc(&qps_destroyed);
36768 + atomic_inc_unchecked(&qps_destroyed);
36769
36770 /* Free the control structures */
36771
36772 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
36773 index bcc6abc..9c76b2f 100644
36774 --- a/drivers/infiniband/hw/nes/nes.h
36775 +++ b/drivers/infiniband/hw/nes/nes.h
36776 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
36777 extern unsigned int wqm_quanta;
36778 extern struct list_head nes_adapter_list;
36779
36780 -extern atomic_t cm_connects;
36781 -extern atomic_t cm_accepts;
36782 -extern atomic_t cm_disconnects;
36783 -extern atomic_t cm_closes;
36784 -extern atomic_t cm_connecteds;
36785 -extern atomic_t cm_connect_reqs;
36786 -extern atomic_t cm_rejects;
36787 -extern atomic_t mod_qp_timouts;
36788 -extern atomic_t qps_created;
36789 -extern atomic_t qps_destroyed;
36790 -extern atomic_t sw_qps_destroyed;
36791 +extern atomic_unchecked_t cm_connects;
36792 +extern atomic_unchecked_t cm_accepts;
36793 +extern atomic_unchecked_t cm_disconnects;
36794 +extern atomic_unchecked_t cm_closes;
36795 +extern atomic_unchecked_t cm_connecteds;
36796 +extern atomic_unchecked_t cm_connect_reqs;
36797 +extern atomic_unchecked_t cm_rejects;
36798 +extern atomic_unchecked_t mod_qp_timouts;
36799 +extern atomic_unchecked_t qps_created;
36800 +extern atomic_unchecked_t qps_destroyed;
36801 +extern atomic_unchecked_t sw_qps_destroyed;
36802 extern u32 mh_detected;
36803 extern u32 mh_pauses_sent;
36804 extern u32 cm_packets_sent;
36805 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
36806 extern u32 cm_listens_created;
36807 extern u32 cm_listens_destroyed;
36808 extern u32 cm_backlog_drops;
36809 -extern atomic_t cm_loopbacks;
36810 -extern atomic_t cm_nodes_created;
36811 -extern atomic_t cm_nodes_destroyed;
36812 -extern atomic_t cm_accel_dropped_pkts;
36813 -extern atomic_t cm_resets_recvd;
36814 +extern atomic_unchecked_t cm_loopbacks;
36815 +extern atomic_unchecked_t cm_nodes_created;
36816 +extern atomic_unchecked_t cm_nodes_destroyed;
36817 +extern atomic_unchecked_t cm_accel_dropped_pkts;
36818 +extern atomic_unchecked_t cm_resets_recvd;
36819
36820 extern u32 int_mod_timer_init;
36821 extern u32 int_mod_cq_depth_256;
36822 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
36823 index 73473db..5ed06e8 100644
36824 --- a/drivers/infiniband/hw/nes/nes_cm.c
36825 +++ b/drivers/infiniband/hw/nes/nes_cm.c
36826 @@ -69,11 +69,11 @@ u32 cm_packets_received;
36827 u32 cm_listens_created;
36828 u32 cm_listens_destroyed;
36829 u32 cm_backlog_drops;
36830 -atomic_t cm_loopbacks;
36831 -atomic_t cm_nodes_created;
36832 -atomic_t cm_nodes_destroyed;
36833 -atomic_t cm_accel_dropped_pkts;
36834 -atomic_t cm_resets_recvd;
36835 +atomic_unchecked_t cm_loopbacks;
36836 +atomic_unchecked_t cm_nodes_created;
36837 +atomic_unchecked_t cm_nodes_destroyed;
36838 +atomic_unchecked_t cm_accel_dropped_pkts;
36839 +atomic_unchecked_t cm_resets_recvd;
36840
36841 static inline int mini_cm_accelerated(struct nes_cm_core *,
36842 struct nes_cm_node *);
36843 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
36844
36845 static struct nes_cm_core *g_cm_core;
36846
36847 -atomic_t cm_connects;
36848 -atomic_t cm_accepts;
36849 -atomic_t cm_disconnects;
36850 -atomic_t cm_closes;
36851 -atomic_t cm_connecteds;
36852 -atomic_t cm_connect_reqs;
36853 -atomic_t cm_rejects;
36854 +atomic_unchecked_t cm_connects;
36855 +atomic_unchecked_t cm_accepts;
36856 +atomic_unchecked_t cm_disconnects;
36857 +atomic_unchecked_t cm_closes;
36858 +atomic_unchecked_t cm_connecteds;
36859 +atomic_unchecked_t cm_connect_reqs;
36860 +atomic_unchecked_t cm_rejects;
36861
36862
36863 /**
36864 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
36865 cm_node->rem_mac);
36866
36867 add_hte_node(cm_core, cm_node);
36868 - atomic_inc(&cm_nodes_created);
36869 + atomic_inc_unchecked(&cm_nodes_created);
36870
36871 return cm_node;
36872 }
36873 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
36874 }
36875
36876 atomic_dec(&cm_core->node_cnt);
36877 - atomic_inc(&cm_nodes_destroyed);
36878 + atomic_inc_unchecked(&cm_nodes_destroyed);
36879 nesqp = cm_node->nesqp;
36880 if (nesqp) {
36881 nesqp->cm_node = NULL;
36882 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
36883
36884 static void drop_packet(struct sk_buff *skb)
36885 {
36886 - atomic_inc(&cm_accel_dropped_pkts);
36887 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
36888 dev_kfree_skb_any(skb);
36889 }
36890
36891 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
36892
36893 int reset = 0; /* whether to send reset in case of err.. */
36894 int passive_state;
36895 - atomic_inc(&cm_resets_recvd);
36896 + atomic_inc_unchecked(&cm_resets_recvd);
36897 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
36898 " refcnt=%d\n", cm_node, cm_node->state,
36899 atomic_read(&cm_node->ref_count));
36900 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
36901 rem_ref_cm_node(cm_node->cm_core, cm_node);
36902 return NULL;
36903 }
36904 - atomic_inc(&cm_loopbacks);
36905 + atomic_inc_unchecked(&cm_loopbacks);
36906 loopbackremotenode->loopbackpartner = cm_node;
36907 loopbackremotenode->tcp_cntxt.rcv_wscale =
36908 NES_CM_DEFAULT_RCV_WND_SCALE;
36909 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
36910 add_ref_cm_node(cm_node);
36911 } else if (cm_node->state == NES_CM_STATE_TSA) {
36912 rem_ref_cm_node(cm_core, cm_node);
36913 - atomic_inc(&cm_accel_dropped_pkts);
36914 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
36915 dev_kfree_skb_any(skb);
36916 break;
36917 }
36918 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36919
36920 if ((cm_id) && (cm_id->event_handler)) {
36921 if (issue_disconn) {
36922 - atomic_inc(&cm_disconnects);
36923 + atomic_inc_unchecked(&cm_disconnects);
36924 cm_event.event = IW_CM_EVENT_DISCONNECT;
36925 cm_event.status = disconn_status;
36926 cm_event.local_addr = cm_id->local_addr;
36927 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36928 }
36929
36930 if (issue_close) {
36931 - atomic_inc(&cm_closes);
36932 + atomic_inc_unchecked(&cm_closes);
36933 nes_disconnect(nesqp, 1);
36934
36935 cm_id->provider_data = nesqp;
36936 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36937
36938 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
36939 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
36940 - atomic_inc(&cm_accepts);
36941 + atomic_inc_unchecked(&cm_accepts);
36942
36943 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
36944 atomic_read(&nesvnic->netdev->refcnt));
36945 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
36946
36947 struct nes_cm_core *cm_core;
36948
36949 - atomic_inc(&cm_rejects);
36950 + atomic_inc_unchecked(&cm_rejects);
36951 cm_node = (struct nes_cm_node *) cm_id->provider_data;
36952 loopback = cm_node->loopbackpartner;
36953 cm_core = cm_node->cm_core;
36954 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36955 ntohl(cm_id->local_addr.sin_addr.s_addr),
36956 ntohs(cm_id->local_addr.sin_port));
36957
36958 - atomic_inc(&cm_connects);
36959 + atomic_inc_unchecked(&cm_connects);
36960 nesqp->active_conn = 1;
36961
36962 /* cache the cm_id in the qp */
36963 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
36964 if (nesqp->destroyed) {
36965 return;
36966 }
36967 - atomic_inc(&cm_connecteds);
36968 + atomic_inc_unchecked(&cm_connecteds);
36969 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
36970 " local port 0x%04X. jiffies = %lu.\n",
36971 nesqp->hwqp.qp_id,
36972 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
36973
36974 ret = cm_id->event_handler(cm_id, &cm_event);
36975 cm_id->add_ref(cm_id);
36976 - atomic_inc(&cm_closes);
36977 + atomic_inc_unchecked(&cm_closes);
36978 cm_event.event = IW_CM_EVENT_CLOSE;
36979 cm_event.status = IW_CM_EVENT_STATUS_OK;
36980 cm_event.provider_data = cm_id->provider_data;
36981 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
36982 return;
36983 cm_id = cm_node->cm_id;
36984
36985 - atomic_inc(&cm_connect_reqs);
36986 + atomic_inc_unchecked(&cm_connect_reqs);
36987 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36988 cm_node, cm_id, jiffies);
36989
36990 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
36991 return;
36992 cm_id = cm_node->cm_id;
36993
36994 - atomic_inc(&cm_connect_reqs);
36995 + atomic_inc_unchecked(&cm_connect_reqs);
36996 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36997 cm_node, cm_id, jiffies);
36998
36999 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
37000 index e593af3..870694a 100644
37001 --- a/drivers/infiniband/hw/nes/nes_nic.c
37002 +++ b/drivers/infiniband/hw/nes/nes_nic.c
37003 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37004 target_stat_values[++index] = mh_detected;
37005 target_stat_values[++index] = mh_pauses_sent;
37006 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37007 - target_stat_values[++index] = atomic_read(&cm_connects);
37008 - target_stat_values[++index] = atomic_read(&cm_accepts);
37009 - target_stat_values[++index] = atomic_read(&cm_disconnects);
37010 - target_stat_values[++index] = atomic_read(&cm_connecteds);
37011 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37012 - target_stat_values[++index] = atomic_read(&cm_rejects);
37013 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37014 - target_stat_values[++index] = atomic_read(&qps_created);
37015 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37016 - target_stat_values[++index] = atomic_read(&qps_destroyed);
37017 - target_stat_values[++index] = atomic_read(&cm_closes);
37018 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37019 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37020 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37021 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37022 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37023 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37024 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37025 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37026 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37027 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37028 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37029 target_stat_values[++index] = cm_packets_sent;
37030 target_stat_values[++index] = cm_packets_bounced;
37031 target_stat_values[++index] = cm_packets_created;
37032 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37033 target_stat_values[++index] = cm_listens_created;
37034 target_stat_values[++index] = cm_listens_destroyed;
37035 target_stat_values[++index] = cm_backlog_drops;
37036 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
37037 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
37038 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37039 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37040 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37041 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37042 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37043 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37044 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37045 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37046 target_stat_values[++index] = int_mod_timer_init;
37047 target_stat_values[++index] = int_mod_cq_depth_1;
37048 target_stat_values[++index] = int_mod_cq_depth_4;
37049 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37050 index a680c42..f914deb 100644
37051 --- a/drivers/infiniband/hw/nes/nes_verbs.c
37052 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
37053 @@ -45,9 +45,9 @@
37054
37055 #include <rdma/ib_umem.h>
37056
37057 -atomic_t mod_qp_timouts;
37058 -atomic_t qps_created;
37059 -atomic_t sw_qps_destroyed;
37060 +atomic_unchecked_t mod_qp_timouts;
37061 +atomic_unchecked_t qps_created;
37062 +atomic_unchecked_t sw_qps_destroyed;
37063
37064 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37065
37066 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37067 if (init_attr->create_flags)
37068 return ERR_PTR(-EINVAL);
37069
37070 - atomic_inc(&qps_created);
37071 + atomic_inc_unchecked(&qps_created);
37072 switch (init_attr->qp_type) {
37073 case IB_QPT_RC:
37074 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37075 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37076 struct iw_cm_event cm_event;
37077 int ret;
37078
37079 - atomic_inc(&sw_qps_destroyed);
37080 + atomic_inc_unchecked(&sw_qps_destroyed);
37081 nesqp->destroyed = 1;
37082
37083 /* Blow away the connection if it exists. */
37084 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37085 index ac11be0..3883c04 100644
37086 --- a/drivers/input/gameport/gameport.c
37087 +++ b/drivers/input/gameport/gameport.c
37088 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
37089 */
37090 static void gameport_init_port(struct gameport *gameport)
37091 {
37092 - static atomic_t gameport_no = ATOMIC_INIT(0);
37093 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37094
37095 __module_get(THIS_MODULE);
37096
37097 mutex_init(&gameport->drv_mutex);
37098 device_initialize(&gameport->dev);
37099 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
37100 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37101 gameport->dev.bus = &gameport_bus;
37102 gameport->dev.release = gameport_release_port;
37103 if (gameport->parent)
37104 diff --git a/drivers/input/input.c b/drivers/input/input.c
37105 index c82ae82..8cfb9cb 100644
37106 --- a/drivers/input/input.c
37107 +++ b/drivers/input/input.c
37108 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
37109 */
37110 int input_register_device(struct input_dev *dev)
37111 {
37112 - static atomic_t input_no = ATOMIC_INIT(0);
37113 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37114 struct input_handler *handler;
37115 const char *path;
37116 int error;
37117 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
37118 dev->setkeycode = input_default_setkeycode;
37119
37120 dev_set_name(&dev->dev, "input%ld",
37121 - (unsigned long) atomic_inc_return(&input_no) - 1);
37122 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37123
37124 error = device_add(&dev->dev);
37125 if (error)
37126 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37127 index ca13a6b..b032b0c 100644
37128 --- a/drivers/input/joystick/sidewinder.c
37129 +++ b/drivers/input/joystick/sidewinder.c
37130 @@ -30,6 +30,7 @@
37131 #include <linux/kernel.h>
37132 #include <linux/module.h>
37133 #include <linux/slab.h>
37134 +#include <linux/sched.h>
37135 #include <linux/init.h>
37136 #include <linux/input.h>
37137 #include <linux/gameport.h>
37138 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
37139 unsigned char buf[SW_LENGTH];
37140 int i;
37141
37142 + pax_track_stack();
37143 +
37144 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
37145
37146 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
37147 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37148 index 79e3edc..01412b9 100644
37149 --- a/drivers/input/joystick/xpad.c
37150 +++ b/drivers/input/joystick/xpad.c
37151 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37152
37153 static int xpad_led_probe(struct usb_xpad *xpad)
37154 {
37155 - static atomic_t led_seq = ATOMIC_INIT(0);
37156 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37157 long led_no;
37158 struct xpad_led *led;
37159 struct led_classdev *led_cdev;
37160 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37161 if (!led)
37162 return -ENOMEM;
37163
37164 - led_no = (long)atomic_inc_return(&led_seq) - 1;
37165 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37166
37167 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37168 led->xpad = xpad;
37169 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37170 index 0236f0d..c7327f1 100644
37171 --- a/drivers/input/serio/serio.c
37172 +++ b/drivers/input/serio/serio.c
37173 @@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
37174 */
37175 static void serio_init_port(struct serio *serio)
37176 {
37177 - static atomic_t serio_no = ATOMIC_INIT(0);
37178 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37179
37180 __module_get(THIS_MODULE);
37181
37182 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
37183 mutex_init(&serio->drv_mutex);
37184 device_initialize(&serio->dev);
37185 dev_set_name(&serio->dev, "serio%ld",
37186 - (long)atomic_inc_return(&serio_no) - 1);
37187 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
37188 serio->dev.bus = &serio_bus;
37189 serio->dev.release = serio_release_port;
37190 if (serio->parent) {
37191 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
37192 index 33dcd8d..2783d25 100644
37193 --- a/drivers/isdn/gigaset/common.c
37194 +++ b/drivers/isdn/gigaset/common.c
37195 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
37196 cs->commands_pending = 0;
37197 cs->cur_at_seq = 0;
37198 cs->gotfwver = -1;
37199 - cs->open_count = 0;
37200 + local_set(&cs->open_count, 0);
37201 cs->dev = NULL;
37202 cs->tty = NULL;
37203 cs->tty_dev = NULL;
37204 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
37205 index a2f6125..6a70677 100644
37206 --- a/drivers/isdn/gigaset/gigaset.h
37207 +++ b/drivers/isdn/gigaset/gigaset.h
37208 @@ -34,6 +34,7 @@
37209 #include <linux/tty_driver.h>
37210 #include <linux/list.h>
37211 #include <asm/atomic.h>
37212 +#include <asm/local.h>
37213
37214 #define GIG_VERSION {0,5,0,0}
37215 #define GIG_COMPAT {0,4,0,0}
37216 @@ -446,7 +447,7 @@ struct cardstate {
37217 spinlock_t cmdlock;
37218 unsigned curlen, cmdbytes;
37219
37220 - unsigned open_count;
37221 + local_t open_count;
37222 struct tty_struct *tty;
37223 struct tasklet_struct if_wake_tasklet;
37224 unsigned control_state;
37225 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37226 index b3065b8..c7e8cc9 100644
37227 --- a/drivers/isdn/gigaset/interface.c
37228 +++ b/drivers/isdn/gigaset/interface.c
37229 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
37230 return -ERESTARTSYS; // FIXME -EINTR?
37231 tty->driver_data = cs;
37232
37233 - ++cs->open_count;
37234 -
37235 - if (cs->open_count == 1) {
37236 + if (local_inc_return(&cs->open_count) == 1) {
37237 spin_lock_irqsave(&cs->lock, flags);
37238 cs->tty = tty;
37239 spin_unlock_irqrestore(&cs->lock, flags);
37240 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
37241
37242 if (!cs->connected)
37243 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37244 - else if (!cs->open_count)
37245 + else if (!local_read(&cs->open_count))
37246 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37247 else {
37248 - if (!--cs->open_count) {
37249 + if (!local_dec_return(&cs->open_count)) {
37250 spin_lock_irqsave(&cs->lock, flags);
37251 cs->tty = NULL;
37252 spin_unlock_irqrestore(&cs->lock, flags);
37253 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
37254 if (!cs->connected) {
37255 gig_dbg(DEBUG_IF, "not connected");
37256 retval = -ENODEV;
37257 - } else if (!cs->open_count)
37258 + } else if (!local_read(&cs->open_count))
37259 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37260 else {
37261 retval = 0;
37262 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
37263 if (!cs->connected) {
37264 gig_dbg(DEBUG_IF, "not connected");
37265 retval = -ENODEV;
37266 - } else if (!cs->open_count)
37267 + } else if (!local_read(&cs->open_count))
37268 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37269 else if (cs->mstate != MS_LOCKED) {
37270 dev_warn(cs->dev, "can't write to unlocked device\n");
37271 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
37272 if (!cs->connected) {
37273 gig_dbg(DEBUG_IF, "not connected");
37274 retval = -ENODEV;
37275 - } else if (!cs->open_count)
37276 + } else if (!local_read(&cs->open_count))
37277 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37278 else if (cs->mstate != MS_LOCKED) {
37279 dev_warn(cs->dev, "can't write to unlocked device\n");
37280 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
37281
37282 if (!cs->connected)
37283 gig_dbg(DEBUG_IF, "not connected");
37284 - else if (!cs->open_count)
37285 + else if (!local_read(&cs->open_count))
37286 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37287 else if (cs->mstate != MS_LOCKED)
37288 dev_warn(cs->dev, "can't write to unlocked device\n");
37289 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
37290
37291 if (!cs->connected)
37292 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37293 - else if (!cs->open_count)
37294 + else if (!local_read(&cs->open_count))
37295 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37296 else {
37297 //FIXME
37298 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
37299
37300 if (!cs->connected)
37301 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37302 - else if (!cs->open_count)
37303 + else if (!local_read(&cs->open_count))
37304 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37305 else {
37306 //FIXME
37307 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
37308 goto out;
37309 }
37310
37311 - if (!cs->open_count) {
37312 + if (!local_read(&cs->open_count)) {
37313 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37314 goto out;
37315 }
37316 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
37317 index a7c0083..62a7cb6 100644
37318 --- a/drivers/isdn/hardware/avm/b1.c
37319 +++ b/drivers/isdn/hardware/avm/b1.c
37320 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
37321 }
37322 if (left) {
37323 if (t4file->user) {
37324 - if (copy_from_user(buf, dp, left))
37325 + if (left > sizeof buf || copy_from_user(buf, dp, left))
37326 return -EFAULT;
37327 } else {
37328 memcpy(buf, dp, left);
37329 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
37330 }
37331 if (left) {
37332 if (config->user) {
37333 - if (copy_from_user(buf, dp, left))
37334 + if (left > sizeof buf || copy_from_user(buf, dp, left))
37335 return -EFAULT;
37336 } else {
37337 memcpy(buf, dp, left);
37338 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
37339 index f130724..c373c68 100644
37340 --- a/drivers/isdn/hardware/eicon/capidtmf.c
37341 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
37342 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
37343 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
37344 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
37345
37346 + pax_track_stack();
37347
37348 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
37349 {
37350 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
37351 index 4d425c6..a9be6c4 100644
37352 --- a/drivers/isdn/hardware/eicon/capifunc.c
37353 +++ b/drivers/isdn/hardware/eicon/capifunc.c
37354 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
37355 IDI_SYNC_REQ req;
37356 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37357
37358 + pax_track_stack();
37359 +
37360 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37361
37362 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37363 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
37364 index 3029234..ef0d9e2 100644
37365 --- a/drivers/isdn/hardware/eicon/diddfunc.c
37366 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
37367 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37368 IDI_SYNC_REQ req;
37369 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37370
37371 + pax_track_stack();
37372 +
37373 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37374
37375 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37376 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
37377 index d36a4c0..11e7d1a 100644
37378 --- a/drivers/isdn/hardware/eicon/divasfunc.c
37379 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
37380 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37381 IDI_SYNC_REQ req;
37382 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37383
37384 + pax_track_stack();
37385 +
37386 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37387
37388 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37389 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
37390 index 85784a7..a19ca98 100644
37391 --- a/drivers/isdn/hardware/eicon/divasync.h
37392 +++ b/drivers/isdn/hardware/eicon/divasync.h
37393 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
37394 } diva_didd_add_adapter_t;
37395 typedef struct _diva_didd_remove_adapter {
37396 IDI_CALL p_request;
37397 -} diva_didd_remove_adapter_t;
37398 +} __no_const diva_didd_remove_adapter_t;
37399 typedef struct _diva_didd_read_adapter_array {
37400 void * buffer;
37401 dword length;
37402 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
37403 index db87d51..7d09acf 100644
37404 --- a/drivers/isdn/hardware/eicon/idifunc.c
37405 +++ b/drivers/isdn/hardware/eicon/idifunc.c
37406 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37407 IDI_SYNC_REQ req;
37408 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37409
37410 + pax_track_stack();
37411 +
37412 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37413
37414 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37415 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
37416 index ae89fb8..0fab299 100644
37417 --- a/drivers/isdn/hardware/eicon/message.c
37418 +++ b/drivers/isdn/hardware/eicon/message.c
37419 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
37420 dword d;
37421 word w;
37422
37423 + pax_track_stack();
37424 +
37425 a = plci->adapter;
37426 Id = ((word)plci->Id<<8)|a->Id;
37427 PUT_WORD(&SS_Ind[4],0x0000);
37428 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
37429 word j, n, w;
37430 dword d;
37431
37432 + pax_track_stack();
37433 +
37434
37435 for(i=0;i<8;i++) bp_parms[i].length = 0;
37436 for(i=0;i<2;i++) global_config[i].length = 0;
37437 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
37438 const byte llc3[] = {4,3,2,2,6,6,0};
37439 const byte header[] = {0,2,3,3,0,0,0};
37440
37441 + pax_track_stack();
37442 +
37443 for(i=0;i<8;i++) bp_parms[i].length = 0;
37444 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
37445 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
37446 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
37447 word appl_number_group_type[MAX_APPL];
37448 PLCI *auxplci;
37449
37450 + pax_track_stack();
37451 +
37452 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
37453
37454 if(!a->group_optimization_enabled)
37455 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
37456 index a564b75..f3cf8b5 100644
37457 --- a/drivers/isdn/hardware/eicon/mntfunc.c
37458 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
37459 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37460 IDI_SYNC_REQ req;
37461 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37462
37463 + pax_track_stack();
37464 +
37465 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37466
37467 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37468 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
37469 index a3bd163..8956575 100644
37470 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
37471 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
37472 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
37473 typedef struct _diva_os_idi_adapter_interface {
37474 diva_init_card_proc_t cleanup_adapter_proc;
37475 diva_cmd_card_proc_t cmd_proc;
37476 -} diva_os_idi_adapter_interface_t;
37477 +} __no_const diva_os_idi_adapter_interface_t;
37478
37479 typedef struct _diva_os_xdi_adapter {
37480 struct list_head link;
37481 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
37482 index adb1e8c..21b590b 100644
37483 --- a/drivers/isdn/i4l/isdn_common.c
37484 +++ b/drivers/isdn/i4l/isdn_common.c
37485 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
37486 } iocpar;
37487 void __user *argp = (void __user *)arg;
37488
37489 + pax_track_stack();
37490 +
37491 #define name iocpar.name
37492 #define bname iocpar.bname
37493 #define iocts iocpar.iocts
37494 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
37495 index 90b56ed..5ed3305 100644
37496 --- a/drivers/isdn/i4l/isdn_net.c
37497 +++ b/drivers/isdn/i4l/isdn_net.c
37498 @@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
37499 {
37500 isdn_net_local *lp = netdev_priv(dev);
37501 unsigned char *p;
37502 - ushort len = 0;
37503 + int len = 0;
37504
37505 switch (lp->p_encap) {
37506 case ISDN_NET_ENCAP_ETHER:
37507 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37508 index bf7997a..cf091db 100644
37509 --- a/drivers/isdn/icn/icn.c
37510 +++ b/drivers/isdn/icn/icn.c
37511 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
37512 if (count > len)
37513 count = len;
37514 if (user) {
37515 - if (copy_from_user(msg, buf, count))
37516 + if (count > sizeof msg || copy_from_user(msg, buf, count))
37517 return -EFAULT;
37518 } else
37519 memcpy(msg, buf, count);
37520 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
37521 index feb0fa4..f76f830 100644
37522 --- a/drivers/isdn/mISDN/socket.c
37523 +++ b/drivers/isdn/mISDN/socket.c
37524 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
37525 if (dev) {
37526 struct mISDN_devinfo di;
37527
37528 + memset(&di, 0, sizeof(di));
37529 di.id = dev->id;
37530 di.Dprotocols = dev->Dprotocols;
37531 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
37532 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
37533 if (dev) {
37534 struct mISDN_devinfo di;
37535
37536 + memset(&di, 0, sizeof(di));
37537 di.id = dev->id;
37538 di.Dprotocols = dev->Dprotocols;
37539 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
37540 diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
37541 index 485be8b..f0225bc 100644
37542 --- a/drivers/isdn/sc/interrupt.c
37543 +++ b/drivers/isdn/sc/interrupt.c
37544 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
37545 }
37546 else if(callid>=0x0000 && callid<=0x7FFF)
37547 {
37548 + int len;
37549 +
37550 pr_debug("%s: Got Incoming Call\n",
37551 sc_adapter[card]->devicename);
37552 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
37553 - strcpy(setup.eazmsn,
37554 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
37555 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
37556 + sizeof(setup.phone));
37557 + if (len >= sizeof(setup.phone))
37558 + continue;
37559 + len = strlcpy(setup.eazmsn,
37560 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
37561 + sizeof(setup.eazmsn));
37562 + if (len >= sizeof(setup.eazmsn))
37563 + continue;
37564 setup.si1 = 7;
37565 setup.si2 = 0;
37566 setup.plan = 0;
37567 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
37568 * Handle a GetMyNumber Rsp
37569 */
37570 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
37571 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
37572 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
37573 + rcvmsg.msg_data.byte_array,
37574 + sizeof(rcvmsg.msg_data.byte_array));
37575 continue;
37576 }
37577
37578 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37579 index 8744d24..d1f9a9a 100644
37580 --- a/drivers/lguest/core.c
37581 +++ b/drivers/lguest/core.c
37582 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
37583 * it's worked so far. The end address needs +1 because __get_vm_area
37584 * allocates an extra guard page, so we need space for that.
37585 */
37586 +
37587 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37588 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37589 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37590 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37591 +#else
37592 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37593 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37594 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37595 +#endif
37596 +
37597 if (!switcher_vma) {
37598 err = -ENOMEM;
37599 printk("lguest: could not map switcher pages high\n");
37600 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
37601 * Now the Switcher is mapped at the right address, we can't fail!
37602 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
37603 */
37604 - memcpy(switcher_vma->addr, start_switcher_text,
37605 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37606 end_switcher_text - start_switcher_text);
37607
37608 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37609 diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
37610 index bd16323..ab460f7 100644
37611 --- a/drivers/lguest/lguest_user.c
37612 +++ b/drivers/lguest/lguest_user.c
37613 @@ -194,6 +194,7 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
37614 * Once our Guest is initialized, the Launcher makes it run by reading
37615 * from /dev/lguest.
37616 */
37617 +static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) __size_overflow(3);
37618 static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
37619 {
37620 struct lguest *lg = file->private_data;
37621 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37622 index 6ae3888..8b38145 100644
37623 --- a/drivers/lguest/x86/core.c
37624 +++ b/drivers/lguest/x86/core.c
37625 @@ -59,7 +59,7 @@ static struct {
37626 /* Offset from where switcher.S was compiled to where we've copied it */
37627 static unsigned long switcher_offset(void)
37628 {
37629 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37630 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37631 }
37632
37633 /* This cpu's struct lguest_pages. */
37634 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37635 * These copies are pretty cheap, so we do them unconditionally: */
37636 /* Save the current Host top-level page directory.
37637 */
37638 +
37639 +#ifdef CONFIG_PAX_PER_CPU_PGD
37640 + pages->state.host_cr3 = read_cr3();
37641 +#else
37642 pages->state.host_cr3 = __pa(current->mm->pgd);
37643 +#endif
37644 +
37645 /*
37646 * Set up the Guest's page tables to see this CPU's pages (and no
37647 * other CPU's pages).
37648 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
37649 * compiled-in switcher code and the high-mapped copy we just made.
37650 */
37651 for (i = 0; i < IDT_ENTRIES; i++)
37652 - default_idt_entries[i] += switcher_offset();
37653 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37654
37655 /*
37656 * Set up the Switcher's per-cpu areas.
37657 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
37658 * it will be undisturbed when we switch. To change %cs and jump we
37659 * need this structure to feed to Intel's "lcall" instruction.
37660 */
37661 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37662 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37663 lguest_entry.segment = LGUEST_CS;
37664
37665 /*
37666 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37667 index 40634b0..4f5855e 100644
37668 --- a/drivers/lguest/x86/switcher_32.S
37669 +++ b/drivers/lguest/x86/switcher_32.S
37670 @@ -87,6 +87,7 @@
37671 #include <asm/page.h>
37672 #include <asm/segment.h>
37673 #include <asm/lguest.h>
37674 +#include <asm/processor-flags.h>
37675
37676 // We mark the start of the code to copy
37677 // It's placed in .text tho it's never run here
37678 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37679 // Changes type when we load it: damn Intel!
37680 // For after we switch over our page tables
37681 // That entry will be read-only: we'd crash.
37682 +
37683 +#ifdef CONFIG_PAX_KERNEXEC
37684 + mov %cr0, %edx
37685 + xor $X86_CR0_WP, %edx
37686 + mov %edx, %cr0
37687 +#endif
37688 +
37689 movl $(GDT_ENTRY_TSS*8), %edx
37690 ltr %dx
37691
37692 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37693 // Let's clear it again for our return.
37694 // The GDT descriptor of the Host
37695 // Points to the table after two "size" bytes
37696 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37697 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37698 // Clear "used" from type field (byte 5, bit 2)
37699 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37700 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37701 +
37702 +#ifdef CONFIG_PAX_KERNEXEC
37703 + mov %cr0, %eax
37704 + xor $X86_CR0_WP, %eax
37705 + mov %eax, %cr0
37706 +#endif
37707
37708 // Once our page table's switched, the Guest is live!
37709 // The Host fades as we run this final step.
37710 @@ -295,13 +309,12 @@ deliver_to_host:
37711 // I consulted gcc, and it gave
37712 // These instructions, which I gladly credit:
37713 leal (%edx,%ebx,8), %eax
37714 - movzwl (%eax),%edx
37715 - movl 4(%eax), %eax
37716 - xorw %ax, %ax
37717 - orl %eax, %edx
37718 + movl 4(%eax), %edx
37719 + movw (%eax), %dx
37720 // Now the address of the handler's in %edx
37721 // We call it now: its "iret" drops us home.
37722 - jmp *%edx
37723 + ljmp $__KERNEL_CS, $1f
37724 +1: jmp *%edx
37725
37726 // Every interrupt can come to us here
37727 // But we must truly tell each apart.
37728 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
37729 index 588a5b0..b71db89 100644
37730 --- a/drivers/macintosh/macio_asic.c
37731 +++ b/drivers/macintosh/macio_asic.c
37732 @@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
37733 * MacIO is matched against any Apple ID, it's probe() function
37734 * will then decide wether it applies or not
37735 */
37736 -static const struct pci_device_id __devinitdata pci_ids [] = { {
37737 +static const struct pci_device_id __devinitconst pci_ids [] = { {
37738 .vendor = PCI_VENDOR_ID_APPLE,
37739 .device = PCI_ANY_ID,
37740 .subvendor = PCI_ANY_ID,
37741 diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
37742 index a348bb0..ecd9b3f 100644
37743 --- a/drivers/macintosh/via-pmu-backlight.c
37744 +++ b/drivers/macintosh/via-pmu-backlight.c
37745 @@ -15,7 +15,7 @@
37746
37747 #define MAX_PMU_LEVEL 0xFF
37748
37749 -static struct backlight_ops pmu_backlight_data;
37750 +static const struct backlight_ops pmu_backlight_data;
37751 static DEFINE_SPINLOCK(pmu_backlight_lock);
37752 static int sleeping, uses_pmu_bl;
37753 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
37754 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
37755 return bd->props.brightness;
37756 }
37757
37758 -static struct backlight_ops pmu_backlight_data = {
37759 +static const struct backlight_ops pmu_backlight_data = {
37760 .get_brightness = pmu_backlight_get_brightness,
37761 .update_status = pmu_backlight_update_status,
37762
37763 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
37764 index 6f308a4..b5f7ff7 100644
37765 --- a/drivers/macintosh/via-pmu.c
37766 +++ b/drivers/macintosh/via-pmu.c
37767 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
37768 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
37769 }
37770
37771 -static struct platform_suspend_ops pmu_pm_ops = {
37772 +static const struct platform_suspend_ops pmu_pm_ops = {
37773 .enter = powerbook_sleep,
37774 .valid = pmu_sleep_valid,
37775 };
37776 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37777 index 818b617..4656e38 100644
37778 --- a/drivers/md/dm-ioctl.c
37779 +++ b/drivers/md/dm-ioctl.c
37780 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37781 cmd == DM_LIST_VERSIONS_CMD)
37782 return 0;
37783
37784 - if ((cmd == DM_DEV_CREATE_CMD)) {
37785 + if (cmd == DM_DEV_CREATE_CMD) {
37786 if (!*param->name) {
37787 DMWARN("name not supplied when creating device");
37788 return -EINVAL;
37789 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37790 index 6021d0a..a878643 100644
37791 --- a/drivers/md/dm-raid1.c
37792 +++ b/drivers/md/dm-raid1.c
37793 @@ -41,7 +41,7 @@ enum dm_raid1_error {
37794
37795 struct mirror {
37796 struct mirror_set *ms;
37797 - atomic_t error_count;
37798 + atomic_unchecked_t error_count;
37799 unsigned long error_type;
37800 struct dm_dev *dev;
37801 sector_t offset;
37802 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37803 * simple way to tell if a device has encountered
37804 * errors.
37805 */
37806 - atomic_inc(&m->error_count);
37807 + atomic_inc_unchecked(&m->error_count);
37808
37809 if (test_and_set_bit(error_type, &m->error_type))
37810 return;
37811 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37812 }
37813
37814 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
37815 - if (!atomic_read(&new->error_count)) {
37816 + if (!atomic_read_unchecked(&new->error_count)) {
37817 set_default_mirror(new);
37818 break;
37819 }
37820 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
37821 struct mirror *m = get_default_mirror(ms);
37822
37823 do {
37824 - if (likely(!atomic_read(&m->error_count)))
37825 + if (likely(!atomic_read_unchecked(&m->error_count)))
37826 return m;
37827
37828 if (m-- == ms->mirror)
37829 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
37830 {
37831 struct mirror *default_mirror = get_default_mirror(m->ms);
37832
37833 - return !atomic_read(&default_mirror->error_count);
37834 + return !atomic_read_unchecked(&default_mirror->error_count);
37835 }
37836
37837 static int mirror_available(struct mirror_set *ms, struct bio *bio)
37838 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
37839 */
37840 if (likely(region_in_sync(ms, region, 1)))
37841 m = choose_mirror(ms, bio->bi_sector);
37842 - else if (m && atomic_read(&m->error_count))
37843 + else if (m && atomic_read_unchecked(&m->error_count))
37844 m = NULL;
37845
37846 if (likely(m))
37847 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
37848 }
37849
37850 ms->mirror[mirror].ms = ms;
37851 - atomic_set(&(ms->mirror[mirror].error_count), 0);
37852 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
37853 ms->mirror[mirror].error_type = 0;
37854 ms->mirror[mirror].offset = offset;
37855
37856 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
37857 */
37858 static char device_status_char(struct mirror *m)
37859 {
37860 - if (!atomic_read(&(m->error_count)))
37861 + if (!atomic_read_unchecked(&(m->error_count)))
37862 return 'A';
37863
37864 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
37865 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
37866 index bd58703..9f26571 100644
37867 --- a/drivers/md/dm-stripe.c
37868 +++ b/drivers/md/dm-stripe.c
37869 @@ -20,7 +20,7 @@ struct stripe {
37870 struct dm_dev *dev;
37871 sector_t physical_start;
37872
37873 - atomic_t error_count;
37874 + atomic_unchecked_t error_count;
37875 };
37876
37877 struct stripe_c {
37878 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37879 kfree(sc);
37880 return r;
37881 }
37882 - atomic_set(&(sc->stripe[i].error_count), 0);
37883 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
37884 }
37885
37886 ti->private = sc;
37887 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
37888 DMEMIT("%d ", sc->stripes);
37889 for (i = 0; i < sc->stripes; i++) {
37890 DMEMIT("%s ", sc->stripe[i].dev->name);
37891 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
37892 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
37893 'D' : 'A';
37894 }
37895 buffer[i] = '\0';
37896 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
37897 */
37898 for (i = 0; i < sc->stripes; i++)
37899 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
37900 - atomic_inc(&(sc->stripe[i].error_count));
37901 - if (atomic_read(&(sc->stripe[i].error_count)) <
37902 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
37903 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
37904 DM_IO_ERROR_THRESHOLD)
37905 queue_work(kstriped, &sc->kstriped_ws);
37906 }
37907 diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
37908 index 4b04590..13a77b2 100644
37909 --- a/drivers/md/dm-sysfs.c
37910 +++ b/drivers/md/dm-sysfs.c
37911 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
37912 NULL,
37913 };
37914
37915 -static struct sysfs_ops dm_sysfs_ops = {
37916 +static const struct sysfs_ops dm_sysfs_ops = {
37917 .show = dm_attr_show,
37918 };
37919
37920 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
37921 index 03345bb..332250d 100644
37922 --- a/drivers/md/dm-table.c
37923 +++ b/drivers/md/dm-table.c
37924 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
37925 if (!dev_size)
37926 return 0;
37927
37928 - if ((start >= dev_size) || (start + len > dev_size)) {
37929 + if ((start >= dev_size) || (len > dev_size - start)) {
37930 DMWARN("%s: %s too small for target: "
37931 "start=%llu, len=%llu, dev_size=%llu",
37932 dm_device_name(ti->table->md), bdevname(bdev, b),
37933 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
37934 index c988ac2..c418141 100644
37935 --- a/drivers/md/dm.c
37936 +++ b/drivers/md/dm.c
37937 @@ -165,9 +165,9 @@ struct mapped_device {
37938 /*
37939 * Event handling.
37940 */
37941 - atomic_t event_nr;
37942 + atomic_unchecked_t event_nr;
37943 wait_queue_head_t eventq;
37944 - atomic_t uevent_seq;
37945 + atomic_unchecked_t uevent_seq;
37946 struct list_head uevent_list;
37947 spinlock_t uevent_lock; /* Protect access to uevent_list */
37948
37949 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
37950 rwlock_init(&md->map_lock);
37951 atomic_set(&md->holders, 1);
37952 atomic_set(&md->open_count, 0);
37953 - atomic_set(&md->event_nr, 0);
37954 - atomic_set(&md->uevent_seq, 0);
37955 + atomic_set_unchecked(&md->event_nr, 0);
37956 + atomic_set_unchecked(&md->uevent_seq, 0);
37957 INIT_LIST_HEAD(&md->uevent_list);
37958 spin_lock_init(&md->uevent_lock);
37959
37960 @@ -1927,7 +1927,7 @@ static void event_callback(void *context)
37961
37962 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
37963
37964 - atomic_inc(&md->event_nr);
37965 + atomic_inc_unchecked(&md->event_nr);
37966 wake_up(&md->eventq);
37967 }
37968
37969 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
37970
37971 uint32_t dm_next_uevent_seq(struct mapped_device *md)
37972 {
37973 - return atomic_add_return(1, &md->uevent_seq);
37974 + return atomic_add_return_unchecked(1, &md->uevent_seq);
37975 }
37976
37977 uint32_t dm_get_event_nr(struct mapped_device *md)
37978 {
37979 - return atomic_read(&md->event_nr);
37980 + return atomic_read_unchecked(&md->event_nr);
37981 }
37982
37983 int dm_wait_event(struct mapped_device *md, int event_nr)
37984 {
37985 return wait_event_interruptible(md->eventq,
37986 - (event_nr != atomic_read(&md->event_nr)));
37987 + (event_nr != atomic_read_unchecked(&md->event_nr)));
37988 }
37989
37990 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
37991 diff --git a/drivers/md/md.c b/drivers/md/md.c
37992 index 4ce6e2f..7a9530a 100644
37993 --- a/drivers/md/md.c
37994 +++ b/drivers/md/md.c
37995 @@ -153,10 +153,10 @@ static int start_readonly;
37996 * start build, activate spare
37997 */
37998 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
37999 -static atomic_t md_event_count;
38000 +static atomic_unchecked_t md_event_count;
38001 void md_new_event(mddev_t *mddev)
38002 {
38003 - atomic_inc(&md_event_count);
38004 + atomic_inc_unchecked(&md_event_count);
38005 wake_up(&md_event_waiters);
38006 }
38007 EXPORT_SYMBOL_GPL(md_new_event);
38008 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
38009 */
38010 static void md_new_event_inintr(mddev_t *mddev)
38011 {
38012 - atomic_inc(&md_event_count);
38013 + atomic_inc_unchecked(&md_event_count);
38014 wake_up(&md_event_waiters);
38015 }
38016
38017 @@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
38018
38019 rdev->preferred_minor = 0xffff;
38020 rdev->data_offset = le64_to_cpu(sb->data_offset);
38021 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38022 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38023
38024 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
38025 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
38026 @@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
38027 else
38028 sb->resync_offset = cpu_to_le64(0);
38029
38030 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
38031 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
38032
38033 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
38034 sb->size = cpu_to_le64(mddev->dev_sectors);
38035 @@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
38036 static ssize_t
38037 errors_show(mdk_rdev_t *rdev, char *page)
38038 {
38039 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
38040 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
38041 }
38042
38043 static ssize_t
38044 @@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
38045 char *e;
38046 unsigned long n = simple_strtoul(buf, &e, 10);
38047 if (*buf && (*e == 0 || *e == '\n')) {
38048 - atomic_set(&rdev->corrected_errors, n);
38049 + atomic_set_unchecked(&rdev->corrected_errors, n);
38050 return len;
38051 }
38052 return -EINVAL;
38053 @@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
38054 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
38055 kfree(rdev);
38056 }
38057 -static struct sysfs_ops rdev_sysfs_ops = {
38058 +static const struct sysfs_ops rdev_sysfs_ops = {
38059 .show = rdev_attr_show,
38060 .store = rdev_attr_store,
38061 };
38062 @@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
38063 rdev->data_offset = 0;
38064 rdev->sb_events = 0;
38065 atomic_set(&rdev->nr_pending, 0);
38066 - atomic_set(&rdev->read_errors, 0);
38067 - atomic_set(&rdev->corrected_errors, 0);
38068 + atomic_set_unchecked(&rdev->read_errors, 0);
38069 + atomic_set_unchecked(&rdev->corrected_errors, 0);
38070
38071 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
38072 if (!size) {
38073 @@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
38074 kfree(mddev);
38075 }
38076
38077 -static struct sysfs_ops md_sysfs_ops = {
38078 +static const struct sysfs_ops md_sysfs_ops = {
38079 .show = md_attr_show,
38080 .store = md_attr_store,
38081 };
38082 @@ -4482,7 +4482,8 @@ out:
38083 err = 0;
38084 blk_integrity_unregister(disk);
38085 md_new_event(mddev);
38086 - sysfs_notify_dirent(mddev->sysfs_state);
38087 + if (mddev->sysfs_state)
38088 + sysfs_notify_dirent(mddev->sysfs_state);
38089 return err;
38090 }
38091
38092 @@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38093
38094 spin_unlock(&pers_lock);
38095 seq_printf(seq, "\n");
38096 - mi->event = atomic_read(&md_event_count);
38097 + mi->event = atomic_read_unchecked(&md_event_count);
38098 return 0;
38099 }
38100 if (v == (void*)2) {
38101 @@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38102 chunk_kb ? "KB" : "B");
38103 if (bitmap->file) {
38104 seq_printf(seq, ", file: ");
38105 - seq_path(seq, &bitmap->file->f_path, " \t\n");
38106 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
38107 }
38108
38109 seq_printf(seq, "\n");
38110 @@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38111 else {
38112 struct seq_file *p = file->private_data;
38113 p->private = mi;
38114 - mi->event = atomic_read(&md_event_count);
38115 + mi->event = atomic_read_unchecked(&md_event_count);
38116 }
38117 return error;
38118 }
38119 @@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38120 /* always allow read */
38121 mask = POLLIN | POLLRDNORM;
38122
38123 - if (mi->event != atomic_read(&md_event_count))
38124 + if (mi->event != atomic_read_unchecked(&md_event_count))
38125 mask |= POLLERR | POLLPRI;
38126 return mask;
38127 }
38128 @@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
38129 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38130 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38131 (int)part_stat_read(&disk->part0, sectors[1]) -
38132 - atomic_read(&disk->sync_io);
38133 + atomic_read_unchecked(&disk->sync_io);
38134 /* sync IO will cause sync_io to increase before the disk_stats
38135 * as sync_io is counted when a request starts, and
38136 * disk_stats is counted when it completes.
38137 diff --git a/drivers/md/md.h b/drivers/md/md.h
38138 index 87430fe..0024a4c 100644
38139 --- a/drivers/md/md.h
38140 +++ b/drivers/md/md.h
38141 @@ -94,10 +94,10 @@ struct mdk_rdev_s
38142 * only maintained for arrays that
38143 * support hot removal
38144 */
38145 - atomic_t read_errors; /* number of consecutive read errors that
38146 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
38147 * we have tried to ignore.
38148 */
38149 - atomic_t corrected_errors; /* number of corrected read errors,
38150 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38151 * for reporting to userspace and storing
38152 * in superblock.
38153 */
38154 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
38155
38156 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38157 {
38158 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38159 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38160 }
38161
38162 struct mdk_personality
38163 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38164 index 968cb14..f0ad2e4 100644
38165 --- a/drivers/md/raid1.c
38166 +++ b/drivers/md/raid1.c
38167 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
38168 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
38169 continue;
38170 rdev = conf->mirrors[d].rdev;
38171 - atomic_add(s, &rdev->corrected_errors);
38172 + atomic_add_unchecked(s, &rdev->corrected_errors);
38173 if (sync_page_io(rdev->bdev,
38174 sect + rdev->data_offset,
38175 s<<9,
38176 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
38177 /* Well, this device is dead */
38178 md_error(mddev, rdev);
38179 else {
38180 - atomic_add(s, &rdev->corrected_errors);
38181 + atomic_add_unchecked(s, &rdev->corrected_errors);
38182 printk(KERN_INFO
38183 "raid1:%s: read error corrected "
38184 "(%d sectors at %llu on %s)\n",
38185 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38186 index 1b4e232..cf0f534b 100644
38187 --- a/drivers/md/raid10.c
38188 +++ b/drivers/md/raid10.c
38189 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
38190 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
38191 set_bit(R10BIO_Uptodate, &r10_bio->state);
38192 else {
38193 - atomic_add(r10_bio->sectors,
38194 + atomic_add_unchecked(r10_bio->sectors,
38195 &conf->mirrors[d].rdev->corrected_errors);
38196 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
38197 md_error(r10_bio->mddev,
38198 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
38199 test_bit(In_sync, &rdev->flags)) {
38200 atomic_inc(&rdev->nr_pending);
38201 rcu_read_unlock();
38202 - atomic_add(s, &rdev->corrected_errors);
38203 + atomic_add_unchecked(s, &rdev->corrected_errors);
38204 if (sync_page_io(rdev->bdev,
38205 r10_bio->devs[sl].addr +
38206 sect + rdev->data_offset,
38207 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38208 index 883215d..675bf47 100644
38209 --- a/drivers/md/raid5.c
38210 +++ b/drivers/md/raid5.c
38211 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
38212 bi->bi_next = NULL;
38213 if ((rw & WRITE) &&
38214 test_bit(R5_ReWrite, &sh->dev[i].flags))
38215 - atomic_add(STRIPE_SECTORS,
38216 + atomic_add_unchecked(STRIPE_SECTORS,
38217 &rdev->corrected_errors);
38218 generic_make_request(bi);
38219 } else {
38220 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
38221 clear_bit(R5_ReadError, &sh->dev[i].flags);
38222 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38223 }
38224 - if (atomic_read(&conf->disks[i].rdev->read_errors))
38225 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
38226 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
38227 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
38228 } else {
38229 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
38230 int retry = 0;
38231 rdev = conf->disks[i].rdev;
38232
38233 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38234 - atomic_inc(&rdev->read_errors);
38235 + atomic_inc_unchecked(&rdev->read_errors);
38236 if (conf->mddev->degraded >= conf->max_degraded)
38237 printk_rl(KERN_WARNING
38238 "raid5:%s: read error not correctable "
38239 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38240 (unsigned long long)(sh->sector
38241 + rdev->data_offset),
38242 bdn);
38243 - else if (atomic_read(&rdev->read_errors)
38244 + else if (atomic_read_unchecked(&rdev->read_errors)
38245 > conf->max_nr_stripes)
38246 printk(KERN_WARNING
38247 "raid5:%s: Too many read errors, failing device %s.\n",
38248 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
38249 sector_t r_sector;
38250 struct stripe_head sh2;
38251
38252 + pax_track_stack();
38253
38254 chunk_offset = sector_div(new_sector, sectors_per_chunk);
38255 stripe = new_sector;
38256 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
38257 index 05bde9c..2f31d40 100644
38258 --- a/drivers/media/common/saa7146_hlp.c
38259 +++ b/drivers/media/common/saa7146_hlp.c
38260 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
38261
38262 int x[32], y[32], w[32], h[32];
38263
38264 + pax_track_stack();
38265 +
38266 /* clear out memory */
38267 memset(&line_list[0], 0x00, sizeof(u32)*32);
38268 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
38269 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38270 index cb22da5..82b686e 100644
38271 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38272 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38273 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
38274 u8 buf[HOST_LINK_BUF_SIZE];
38275 int i;
38276
38277 + pax_track_stack();
38278 +
38279 dprintk("%s\n", __func__);
38280
38281 /* check if we have space for a link buf in the rx_buffer */
38282 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
38283 unsigned long timeout;
38284 int written;
38285
38286 + pax_track_stack();
38287 +
38288 dprintk("%s\n", __func__);
38289
38290 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
38291 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
38292 index 2fe05d0..a3289c4 100644
38293 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
38294 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
38295 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
38296 union {
38297 dmx_ts_cb ts;
38298 dmx_section_cb sec;
38299 - } cb;
38300 + } __no_const cb;
38301
38302 struct dvb_demux *demux;
38303 void *priv;
38304 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
38305 index 94159b9..376bd8e 100644
38306 --- a/drivers/media/dvb/dvb-core/dvbdev.c
38307 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
38308 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38309 const struct dvb_device *template, void *priv, int type)
38310 {
38311 struct dvb_device *dvbdev;
38312 - struct file_operations *dvbdevfops;
38313 + file_operations_no_const *dvbdevfops;
38314 struct device *clsdev;
38315 int minor;
38316 int id;
38317 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
38318 index 2a53dd0..db8c07a 100644
38319 --- a/drivers/media/dvb/dvb-usb/cxusb.c
38320 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
38321 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
38322 struct dib0700_adapter_state {
38323 int (*set_param_save) (struct dvb_frontend *,
38324 struct dvb_frontend_parameters *);
38325 -};
38326 +} __no_const;
38327
38328 static int dib7070_set_param_override(struct dvb_frontend *fe,
38329 struct dvb_frontend_parameters *fep)
38330 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
38331 index db7f7f7..f55e96f 100644
38332 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
38333 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
38334 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
38335
38336 u8 buf[260];
38337
38338 + pax_track_stack();
38339 +
38340 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
38341 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
38342
38343 diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
38344 index 524acf5..5ffc403 100644
38345 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
38346 +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
38347 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
38348
38349 struct dib0700_adapter_state {
38350 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
38351 -};
38352 +} __no_const;
38353
38354 /* Hauppauge Nova-T 500 (aka Bristol)
38355 * has a LNA on GPIO0 which is enabled by setting 1 */
38356 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
38357 index ba91735..4261d84 100644
38358 --- a/drivers/media/dvb/frontends/dib3000.h
38359 +++ b/drivers/media/dvb/frontends/dib3000.h
38360 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38361 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38362 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38363 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38364 -};
38365 +} __no_const;
38366
38367 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
38368 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38369 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
38370 index c709ce6..b3fe620 100644
38371 --- a/drivers/media/dvb/frontends/or51211.c
38372 +++ b/drivers/media/dvb/frontends/or51211.c
38373 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
38374 u8 tudata[585];
38375 int i;
38376
38377 + pax_track_stack();
38378 +
38379 dprintk("Firmware is %zd bytes\n",fw->size);
38380
38381 /* Get eprom data */
38382 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
38383 index 482d0f3..ee1e202 100644
38384 --- a/drivers/media/radio/radio-cadet.c
38385 +++ b/drivers/media/radio/radio-cadet.c
38386 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38387 while (i < count && dev->rdsin != dev->rdsout)
38388 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
38389
38390 - if (copy_to_user(data, readbuf, i))
38391 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
38392 return -EFAULT;
38393 return i;
38394 }
38395 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
38396 index 6dd51e2..0359b92 100644
38397 --- a/drivers/media/video/cx18/cx18-driver.c
38398 +++ b/drivers/media/video/cx18/cx18-driver.c
38399 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
38400
38401 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
38402
38403 -static atomic_t cx18_instance = ATOMIC_INIT(0);
38404 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
38405
38406 /* Parameter declarations */
38407 static int cardtype[CX18_MAX_CARDS];
38408 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
38409 struct i2c_client c;
38410 u8 eedata[256];
38411
38412 + pax_track_stack();
38413 +
38414 memset(&c, 0, sizeof(c));
38415 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
38416 c.adapter = &cx->i2c_adap[0];
38417 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
38418 struct cx18 *cx;
38419
38420 /* FIXME - module parameter arrays constrain max instances */
38421 - i = atomic_inc_return(&cx18_instance) - 1;
38422 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
38423 if (i >= CX18_MAX_CARDS) {
38424 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
38425 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
38426 diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
38427 index 463ec34..2f4625a 100644
38428 --- a/drivers/media/video/ivtv/ivtv-driver.c
38429 +++ b/drivers/media/video/ivtv/ivtv-driver.c
38430 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
38431 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
38432
38433 /* ivtv instance counter */
38434 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
38435 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
38436
38437 /* Parameter declarations */
38438 static int cardtype[IVTV_MAX_CARDS];
38439 diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
38440 index 5fc4ac0..652a54a 100644
38441 --- a/drivers/media/video/omap24xxcam.c
38442 +++ b/drivers/media/video/omap24xxcam.c
38443 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
38444 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
38445
38446 do_gettimeofday(&vb->ts);
38447 - vb->field_count = atomic_add_return(2, &fh->field_count);
38448 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
38449 if (csr & csr_error) {
38450 vb->state = VIDEOBUF_ERROR;
38451 if (!atomic_read(&fh->cam->in_reset)) {
38452 diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
38453 index 2ce67f5..cf26a5b 100644
38454 --- a/drivers/media/video/omap24xxcam.h
38455 +++ b/drivers/media/video/omap24xxcam.h
38456 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
38457 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
38458 struct videobuf_queue vbq;
38459 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
38460 - atomic_t field_count; /* field counter for videobuf_buffer */
38461 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
38462 /* accessing cam here doesn't need serialisation: it's constant */
38463 struct omap24xxcam_device *cam;
38464 };
38465 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38466 index 299afa4..eb47459 100644
38467 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38468 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38469 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
38470 u8 *eeprom;
38471 struct tveeprom tvdata;
38472
38473 + pax_track_stack();
38474 +
38475 memset(&tvdata,0,sizeof(tvdata));
38476
38477 eeprom = pvr2_eeprom_fetch(hdw);
38478 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38479 index 5b152ff..3320638 100644
38480 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38481 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38482 @@ -195,7 +195,7 @@ struct pvr2_hdw {
38483
38484 /* I2C stuff */
38485 struct i2c_adapter i2c_adap;
38486 - struct i2c_algorithm i2c_algo;
38487 + i2c_algorithm_no_const i2c_algo;
38488 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
38489 int i2c_cx25840_hack_state;
38490 int i2c_linked;
38491 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
38492 index 1eabff6..8e2313a 100644
38493 --- a/drivers/media/video/saa7134/saa6752hs.c
38494 +++ b/drivers/media/video/saa7134/saa6752hs.c
38495 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
38496 unsigned char localPAT[256];
38497 unsigned char localPMT[256];
38498
38499 + pax_track_stack();
38500 +
38501 /* Set video format - must be done first as it resets other settings */
38502 set_reg8(client, 0x41, h->video_format);
38503
38504 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
38505 index 9c1d3ac..b1b49e9 100644
38506 --- a/drivers/media/video/saa7164/saa7164-cmd.c
38507 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
38508 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
38509 wait_queue_head_t *q = 0;
38510 dprintk(DBGLVL_CMD, "%s()\n", __func__);
38511
38512 + pax_track_stack();
38513 +
38514 /* While any outstand message on the bus exists... */
38515 do {
38516
38517 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
38518 u8 tmp[512];
38519 dprintk(DBGLVL_CMD, "%s()\n", __func__);
38520
38521 + pax_track_stack();
38522 +
38523 while (loop) {
38524
38525 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
38526 diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
38527 index b085496..cde0270 100644
38528 --- a/drivers/media/video/usbvideo/ibmcam.c
38529 +++ b/drivers/media/video/usbvideo/ibmcam.c
38530 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
38531 static int __init ibmcam_init(void)
38532 {
38533 struct usbvideo_cb cbTbl;
38534 - memset(&cbTbl, 0, sizeof(cbTbl));
38535 - cbTbl.probe = ibmcam_probe;
38536 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
38537 - cbTbl.videoStart = ibmcam_video_start;
38538 - cbTbl.videoStop = ibmcam_video_stop;
38539 - cbTbl.processData = ibmcam_ProcessIsocData;
38540 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38541 - cbTbl.adjustPicture = ibmcam_adjust_picture;
38542 - cbTbl.getFPS = ibmcam_calculate_fps;
38543 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
38544 + *(void **)&cbTbl.probe = ibmcam_probe;
38545 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
38546 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
38547 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
38548 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
38549 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38550 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
38551 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
38552 return usbvideo_register(
38553 &cams,
38554 MAX_IBMCAM,
38555 diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
38556 index 31d57f2..600b735 100644
38557 --- a/drivers/media/video/usbvideo/konicawc.c
38558 +++ b/drivers/media/video/usbvideo/konicawc.c
38559 @@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
38560 int error;
38561
38562 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
38563 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38564 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38565
38566 cam->input = input_dev = input_allocate_device();
38567 if (!input_dev) {
38568 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
38569 struct usbvideo_cb cbTbl;
38570 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
38571 DRIVER_DESC "\n");
38572 - memset(&cbTbl, 0, sizeof(cbTbl));
38573 - cbTbl.probe = konicawc_probe;
38574 - cbTbl.setupOnOpen = konicawc_setup_on_open;
38575 - cbTbl.processData = konicawc_process_isoc;
38576 - cbTbl.getFPS = konicawc_calculate_fps;
38577 - cbTbl.setVideoMode = konicawc_set_video_mode;
38578 - cbTbl.startDataPump = konicawc_start_data;
38579 - cbTbl.stopDataPump = konicawc_stop_data;
38580 - cbTbl.adjustPicture = konicawc_adjust_picture;
38581 - cbTbl.userFree = konicawc_free_uvd;
38582 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
38583 + *(void **)&cbTbl.probe = konicawc_probe;
38584 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
38585 + *(void **)&cbTbl.processData = konicawc_process_isoc;
38586 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
38587 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
38588 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
38589 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
38590 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
38591 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
38592 return usbvideo_register(
38593 &cams,
38594 MAX_CAMERAS,
38595 diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
38596 index 803d3e4..c4d1b96 100644
38597 --- a/drivers/media/video/usbvideo/quickcam_messenger.c
38598 +++ b/drivers/media/video/usbvideo/quickcam_messenger.c
38599 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
38600 int error;
38601
38602 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
38603 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38604 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38605
38606 cam->input = input_dev = input_allocate_device();
38607 if (!input_dev) {
38608 diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
38609 index fbd1b63..292f9f0 100644
38610 --- a/drivers/media/video/usbvideo/ultracam.c
38611 +++ b/drivers/media/video/usbvideo/ultracam.c
38612 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
38613 {
38614 struct usbvideo_cb cbTbl;
38615 memset(&cbTbl, 0, sizeof(cbTbl));
38616 - cbTbl.probe = ultracam_probe;
38617 - cbTbl.setupOnOpen = ultracam_setup_on_open;
38618 - cbTbl.videoStart = ultracam_video_start;
38619 - cbTbl.videoStop = ultracam_video_stop;
38620 - cbTbl.processData = ultracam_ProcessIsocData;
38621 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38622 - cbTbl.adjustPicture = ultracam_adjust_picture;
38623 - cbTbl.getFPS = ultracam_calculate_fps;
38624 + *(void **)&cbTbl.probe = ultracam_probe;
38625 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
38626 + *(void **)&cbTbl.videoStart = ultracam_video_start;
38627 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
38628 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
38629 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38630 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
38631 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
38632 return usbvideo_register(
38633 &cams,
38634 MAX_CAMERAS,
38635 diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
38636 index dea8b32..34f6878 100644
38637 --- a/drivers/media/video/usbvideo/usbvideo.c
38638 +++ b/drivers/media/video/usbvideo/usbvideo.c
38639 @@ -697,15 +697,15 @@ int usbvideo_register(
38640 __func__, cams, base_size, num_cams);
38641
38642 /* Copy callbacks, apply defaults for those that are not set */
38643 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
38644 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
38645 if (cams->cb.getFrame == NULL)
38646 - cams->cb.getFrame = usbvideo_GetFrame;
38647 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
38648 if (cams->cb.disconnect == NULL)
38649 - cams->cb.disconnect = usbvideo_Disconnect;
38650 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
38651 if (cams->cb.startDataPump == NULL)
38652 - cams->cb.startDataPump = usbvideo_StartDataPump;
38653 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
38654 if (cams->cb.stopDataPump == NULL)
38655 - cams->cb.stopDataPump = usbvideo_StopDataPump;
38656 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
38657
38658 cams->num_cameras = num_cams;
38659 cams->cam = (struct uvd *) &cams[1];
38660 diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
38661 index c66985b..7fa143a 100644
38662 --- a/drivers/media/video/usbvideo/usbvideo.h
38663 +++ b/drivers/media/video/usbvideo/usbvideo.h
38664 @@ -268,7 +268,7 @@ struct usbvideo_cb {
38665 int (*startDataPump)(struct uvd *uvd);
38666 void (*stopDataPump)(struct uvd *uvd);
38667 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
38668 -};
38669 +} __no_const;
38670
38671 struct usbvideo {
38672 int num_cameras; /* As allocated */
38673 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
38674 index e0f91e4..37554ea 100644
38675 --- a/drivers/media/video/usbvision/usbvision-core.c
38676 +++ b/drivers/media/video/usbvision/usbvision-core.c
38677 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
38678 unsigned char rv, gv, bv;
38679 static unsigned char *Y, *U, *V;
38680
38681 + pax_track_stack();
38682 +
38683 frame = usbvision->curFrame;
38684 imageSize = frame->frmwidth * frame->frmheight;
38685 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
38686 diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
38687 index 0d06e7c..3d17d24 100644
38688 --- a/drivers/media/video/v4l2-device.c
38689 +++ b/drivers/media/video/v4l2-device.c
38690 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
38691 EXPORT_SYMBOL_GPL(v4l2_device_register);
38692
38693 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
38694 - atomic_t *instance)
38695 + atomic_unchecked_t *instance)
38696 {
38697 - int num = atomic_inc_return(instance) - 1;
38698 + int num = atomic_inc_return_unchecked(instance) - 1;
38699 int len = strlen(basename);
38700
38701 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
38702 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
38703 index 032ebae..4ebd8e8 100644
38704 --- a/drivers/media/video/videobuf-dma-sg.c
38705 +++ b/drivers/media/video/videobuf-dma-sg.c
38706 @@ -631,6 +631,9 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
38707
38708 static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38709 char __user *data, size_t count,
38710 + int nonblocking ) __size_overflow(3);
38711 +static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38712 + char __user *data, size_t count,
38713 int nonblocking )
38714 {
38715 struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
38716 @@ -693,6 +696,8 @@ void *videobuf_sg_alloc(size_t size)
38717 {
38718 struct videobuf_queue q;
38719
38720 + pax_track_stack();
38721 +
38722 /* Required to make generic handler to call __videobuf_alloc */
38723 q.int_ops = &sg_ops;
38724
38725 diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
38726 index 35f3900..aa7c2f1 100644
38727 --- a/drivers/media/video/videobuf-vmalloc.c
38728 +++ b/drivers/media/video/videobuf-vmalloc.c
38729 @@ -330,6 +330,9 @@ error:
38730
38731 static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38732 char __user *data, size_t count,
38733 + int nonblocking ) __size_overflow(3);
38734 +static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38735 + char __user *data, size_t count,
38736 int nonblocking )
38737 {
38738 struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
38739 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38740 index b6992b7..9fa7547 100644
38741 --- a/drivers/message/fusion/mptbase.c
38742 +++ b/drivers/message/fusion/mptbase.c
38743 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
38744 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38745 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38746
38747 +#ifdef CONFIG_GRKERNSEC_HIDESYM
38748 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38749 + NULL, NULL);
38750 +#else
38751 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38752 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38753 +#endif
38754 +
38755 /*
38756 * Rounding UP to nearest 4-kB boundary here...
38757 */
38758 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38759 index 83873e3..e360e9a 100644
38760 --- a/drivers/message/fusion/mptsas.c
38761 +++ b/drivers/message/fusion/mptsas.c
38762 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38763 return 0;
38764 }
38765
38766 +static inline void
38767 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38768 +{
38769 + if (phy_info->port_details) {
38770 + phy_info->port_details->rphy = rphy;
38771 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38772 + ioc->name, rphy));
38773 + }
38774 +
38775 + if (rphy) {
38776 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38777 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38778 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38779 + ioc->name, rphy, rphy->dev.release));
38780 + }
38781 +}
38782 +
38783 /* no mutex */
38784 static void
38785 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38786 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38787 return NULL;
38788 }
38789
38790 -static inline void
38791 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38792 -{
38793 - if (phy_info->port_details) {
38794 - phy_info->port_details->rphy = rphy;
38795 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38796 - ioc->name, rphy));
38797 - }
38798 -
38799 - if (rphy) {
38800 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38801 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38802 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38803 - ioc->name, rphy, rphy->dev.release));
38804 - }
38805 -}
38806 -
38807 static inline struct sas_port *
38808 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38809 {
38810 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38811 index bd096ca..332cf76 100644
38812 --- a/drivers/message/fusion/mptscsih.c
38813 +++ b/drivers/message/fusion/mptscsih.c
38814 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38815
38816 h = shost_priv(SChost);
38817
38818 - if (h) {
38819 - if (h->info_kbuf == NULL)
38820 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38821 - return h->info_kbuf;
38822 - h->info_kbuf[0] = '\0';
38823 + if (!h)
38824 + return NULL;
38825
38826 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38827 - h->info_kbuf[size-1] = '\0';
38828 - }
38829 + if (h->info_kbuf == NULL)
38830 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38831 + return h->info_kbuf;
38832 + h->info_kbuf[0] = '\0';
38833 +
38834 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38835 + h->info_kbuf[size-1] = '\0';
38836
38837 return h->info_kbuf;
38838 }
38839 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
38840 index efba702..59b2c0f 100644
38841 --- a/drivers/message/i2o/i2o_config.c
38842 +++ b/drivers/message/i2o/i2o_config.c
38843 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
38844 struct i2o_message *msg;
38845 unsigned int iop;
38846
38847 + pax_track_stack();
38848 +
38849 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
38850 return -EFAULT;
38851
38852 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38853 index 7045c45..c07b170 100644
38854 --- a/drivers/message/i2o/i2o_proc.c
38855 +++ b/drivers/message/i2o/i2o_proc.c
38856 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
38857 "Array Controller Device"
38858 };
38859
38860 -static char *chtostr(u8 * chars, int n)
38861 -{
38862 - char tmp[256];
38863 - tmp[0] = 0;
38864 - return strncat(tmp, (char *)chars, n);
38865 -}
38866 -
38867 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38868 char *group)
38869 {
38870 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38871
38872 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38873 seq_printf(seq, "%-#8x", ddm_table.module_id);
38874 - seq_printf(seq, "%-29s",
38875 - chtostr(ddm_table.module_name_version, 28));
38876 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38877 seq_printf(seq, "%9d ", ddm_table.data_size);
38878 seq_printf(seq, "%8d", ddm_table.code_size);
38879
38880 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38881
38882 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38883 seq_printf(seq, "%-#8x", dst->module_id);
38884 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
38885 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
38886 + seq_printf(seq, "%-.28s", dst->module_name_version);
38887 + seq_printf(seq, "%-.8s", dst->date);
38888 seq_printf(seq, "%8d ", dst->module_size);
38889 seq_printf(seq, "%8d ", dst->mpb_size);
38890 seq_printf(seq, "0x%04x", dst->module_flags);
38891 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38892 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38893 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38894 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38895 - seq_printf(seq, "Vendor info : %s\n",
38896 - chtostr((u8 *) (work32 + 2), 16));
38897 - seq_printf(seq, "Product info : %s\n",
38898 - chtostr((u8 *) (work32 + 6), 16));
38899 - seq_printf(seq, "Description : %s\n",
38900 - chtostr((u8 *) (work32 + 10), 16));
38901 - seq_printf(seq, "Product rev. : %s\n",
38902 - chtostr((u8 *) (work32 + 14), 8));
38903 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38904 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38905 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38906 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38907
38908 seq_printf(seq, "Serial number : ");
38909 print_serial_number(seq, (u8 *) (work32 + 16),
38910 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38911 }
38912
38913 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38914 - seq_printf(seq, "Module name : %s\n",
38915 - chtostr(result.module_name, 24));
38916 - seq_printf(seq, "Module revision : %s\n",
38917 - chtostr(result.module_rev, 8));
38918 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
38919 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38920
38921 seq_printf(seq, "Serial number : ");
38922 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
38923 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38924 return 0;
38925 }
38926
38927 - seq_printf(seq, "Device name : %s\n",
38928 - chtostr(result.device_name, 64));
38929 - seq_printf(seq, "Service name : %s\n",
38930 - chtostr(result.service_name, 64));
38931 - seq_printf(seq, "Physical name : %s\n",
38932 - chtostr(result.physical_location, 64));
38933 - seq_printf(seq, "Instance number : %s\n",
38934 - chtostr(result.instance_number, 4));
38935 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
38936 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
38937 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
38938 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
38939
38940 return 0;
38941 }
38942 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
38943 index 27cf4af..b1205b8 100644
38944 --- a/drivers/message/i2o/iop.c
38945 +++ b/drivers/message/i2o/iop.c
38946 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
38947
38948 spin_lock_irqsave(&c->context_list_lock, flags);
38949
38950 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
38951 - atomic_inc(&c->context_list_counter);
38952 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
38953 + atomic_inc_unchecked(&c->context_list_counter);
38954
38955 - entry->context = atomic_read(&c->context_list_counter);
38956 + entry->context = atomic_read_unchecked(&c->context_list_counter);
38957
38958 list_add(&entry->list, &c->context_list);
38959
38960 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
38961
38962 #if BITS_PER_LONG == 64
38963 spin_lock_init(&c->context_list_lock);
38964 - atomic_set(&c->context_list_counter, 0);
38965 + atomic_set_unchecked(&c->context_list_counter, 0);
38966 INIT_LIST_HEAD(&c->context_list);
38967 #endif
38968
38969 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
38970 index 78e3e85..66c9a0d 100644
38971 --- a/drivers/mfd/ab3100-core.c
38972 +++ b/drivers/mfd/ab3100-core.c
38973 @@ -777,7 +777,7 @@ struct ab_family_id {
38974 char *name;
38975 };
38976
38977 -static const struct ab_family_id ids[] __initdata = {
38978 +static const struct ab_family_id ids[] __initconst = {
38979 /* AB3100 */
38980 {
38981 .id = 0xc0,
38982 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
38983 index 8d8c932..8104515 100644
38984 --- a/drivers/mfd/wm8350-i2c.c
38985 +++ b/drivers/mfd/wm8350-i2c.c
38986 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
38987 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
38988 int ret;
38989
38990 + pax_track_stack();
38991 +
38992 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
38993 return -EINVAL;
38994
38995 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
38996 index e4ff50b..4cc3f04 100644
38997 --- a/drivers/misc/kgdbts.c
38998 +++ b/drivers/misc/kgdbts.c
38999 @@ -118,7 +118,7 @@
39000 } while (0)
39001 #define MAX_CONFIG_LEN 40
39002
39003 -static struct kgdb_io kgdbts_io_ops;
39004 +static const struct kgdb_io kgdbts_io_ops;
39005 static char get_buf[BUFMAX];
39006 static int get_buf_cnt;
39007 static char put_buf[BUFMAX];
39008 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
39009 module_put(THIS_MODULE);
39010 }
39011
39012 -static struct kgdb_io kgdbts_io_ops = {
39013 +static const struct kgdb_io kgdbts_io_ops = {
39014 .name = "kgdbts",
39015 .read_char = kgdbts_get_char,
39016 .write_char = kgdbts_put_char,
39017 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
39018 index 37e7cfc..67cfb76 100644
39019 --- a/drivers/misc/sgi-gru/gruhandles.c
39020 +++ b/drivers/misc/sgi-gru/gruhandles.c
39021 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39022
39023 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
39024 {
39025 - atomic_long_inc(&mcs_op_statistics[op].count);
39026 - atomic_long_add(clks, &mcs_op_statistics[op].total);
39027 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
39028 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
39029 if (mcs_op_statistics[op].max < clks)
39030 mcs_op_statistics[op].max = clks;
39031 }
39032 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
39033 index 3f2375c..467c6e6 100644
39034 --- a/drivers/misc/sgi-gru/gruprocfs.c
39035 +++ b/drivers/misc/sgi-gru/gruprocfs.c
39036 @@ -32,9 +32,9 @@
39037
39038 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
39039
39040 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
39041 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
39042 {
39043 - unsigned long val = atomic_long_read(v);
39044 + unsigned long val = atomic_long_read_unchecked(v);
39045
39046 if (val)
39047 seq_printf(s, "%16lu %s\n", val, id);
39048 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
39049 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
39050
39051 for (op = 0; op < mcsop_last; op++) {
39052 - count = atomic_long_read(&mcs_op_statistics[op].count);
39053 - total = atomic_long_read(&mcs_op_statistics[op].total);
39054 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
39055 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
39056 max = mcs_op_statistics[op].max;
39057 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
39058 count ? total / count : 0, max);
39059 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
39060 index 46990bc..4a251b5 100644
39061 --- a/drivers/misc/sgi-gru/grutables.h
39062 +++ b/drivers/misc/sgi-gru/grutables.h
39063 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
39064 * GRU statistics.
39065 */
39066 struct gru_stats_s {
39067 - atomic_long_t vdata_alloc;
39068 - atomic_long_t vdata_free;
39069 - atomic_long_t gts_alloc;
39070 - atomic_long_t gts_free;
39071 - atomic_long_t vdata_double_alloc;
39072 - atomic_long_t gts_double_allocate;
39073 - atomic_long_t assign_context;
39074 - atomic_long_t assign_context_failed;
39075 - atomic_long_t free_context;
39076 - atomic_long_t load_user_context;
39077 - atomic_long_t load_kernel_context;
39078 - atomic_long_t lock_kernel_context;
39079 - atomic_long_t unlock_kernel_context;
39080 - atomic_long_t steal_user_context;
39081 - atomic_long_t steal_kernel_context;
39082 - atomic_long_t steal_context_failed;
39083 - atomic_long_t nopfn;
39084 - atomic_long_t break_cow;
39085 - atomic_long_t asid_new;
39086 - atomic_long_t asid_next;
39087 - atomic_long_t asid_wrap;
39088 - atomic_long_t asid_reuse;
39089 - atomic_long_t intr;
39090 - atomic_long_t intr_mm_lock_failed;
39091 - atomic_long_t call_os;
39092 - atomic_long_t call_os_offnode_reference;
39093 - atomic_long_t call_os_check_for_bug;
39094 - atomic_long_t call_os_wait_queue;
39095 - atomic_long_t user_flush_tlb;
39096 - atomic_long_t user_unload_context;
39097 - atomic_long_t user_exception;
39098 - atomic_long_t set_context_option;
39099 - atomic_long_t migrate_check;
39100 - atomic_long_t migrated_retarget;
39101 - atomic_long_t migrated_unload;
39102 - atomic_long_t migrated_unload_delay;
39103 - atomic_long_t migrated_nopfn_retarget;
39104 - atomic_long_t migrated_nopfn_unload;
39105 - atomic_long_t tlb_dropin;
39106 - atomic_long_t tlb_dropin_fail_no_asid;
39107 - atomic_long_t tlb_dropin_fail_upm;
39108 - atomic_long_t tlb_dropin_fail_invalid;
39109 - atomic_long_t tlb_dropin_fail_range_active;
39110 - atomic_long_t tlb_dropin_fail_idle;
39111 - atomic_long_t tlb_dropin_fail_fmm;
39112 - atomic_long_t tlb_dropin_fail_no_exception;
39113 - atomic_long_t tlb_dropin_fail_no_exception_war;
39114 - atomic_long_t tfh_stale_on_fault;
39115 - atomic_long_t mmu_invalidate_range;
39116 - atomic_long_t mmu_invalidate_page;
39117 - atomic_long_t mmu_clear_flush_young;
39118 - atomic_long_t flush_tlb;
39119 - atomic_long_t flush_tlb_gru;
39120 - atomic_long_t flush_tlb_gru_tgh;
39121 - atomic_long_t flush_tlb_gru_zero_asid;
39122 + atomic_long_unchecked_t vdata_alloc;
39123 + atomic_long_unchecked_t vdata_free;
39124 + atomic_long_unchecked_t gts_alloc;
39125 + atomic_long_unchecked_t gts_free;
39126 + atomic_long_unchecked_t vdata_double_alloc;
39127 + atomic_long_unchecked_t gts_double_allocate;
39128 + atomic_long_unchecked_t assign_context;
39129 + atomic_long_unchecked_t assign_context_failed;
39130 + atomic_long_unchecked_t free_context;
39131 + atomic_long_unchecked_t load_user_context;
39132 + atomic_long_unchecked_t load_kernel_context;
39133 + atomic_long_unchecked_t lock_kernel_context;
39134 + atomic_long_unchecked_t unlock_kernel_context;
39135 + atomic_long_unchecked_t steal_user_context;
39136 + atomic_long_unchecked_t steal_kernel_context;
39137 + atomic_long_unchecked_t steal_context_failed;
39138 + atomic_long_unchecked_t nopfn;
39139 + atomic_long_unchecked_t break_cow;
39140 + atomic_long_unchecked_t asid_new;
39141 + atomic_long_unchecked_t asid_next;
39142 + atomic_long_unchecked_t asid_wrap;
39143 + atomic_long_unchecked_t asid_reuse;
39144 + atomic_long_unchecked_t intr;
39145 + atomic_long_unchecked_t intr_mm_lock_failed;
39146 + atomic_long_unchecked_t call_os;
39147 + atomic_long_unchecked_t call_os_offnode_reference;
39148 + atomic_long_unchecked_t call_os_check_for_bug;
39149 + atomic_long_unchecked_t call_os_wait_queue;
39150 + atomic_long_unchecked_t user_flush_tlb;
39151 + atomic_long_unchecked_t user_unload_context;
39152 + atomic_long_unchecked_t user_exception;
39153 + atomic_long_unchecked_t set_context_option;
39154 + atomic_long_unchecked_t migrate_check;
39155 + atomic_long_unchecked_t migrated_retarget;
39156 + atomic_long_unchecked_t migrated_unload;
39157 + atomic_long_unchecked_t migrated_unload_delay;
39158 + atomic_long_unchecked_t migrated_nopfn_retarget;
39159 + atomic_long_unchecked_t migrated_nopfn_unload;
39160 + atomic_long_unchecked_t tlb_dropin;
39161 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39162 + atomic_long_unchecked_t tlb_dropin_fail_upm;
39163 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
39164 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
39165 + atomic_long_unchecked_t tlb_dropin_fail_idle;
39166 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
39167 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39168 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
39169 + atomic_long_unchecked_t tfh_stale_on_fault;
39170 + atomic_long_unchecked_t mmu_invalidate_range;
39171 + atomic_long_unchecked_t mmu_invalidate_page;
39172 + atomic_long_unchecked_t mmu_clear_flush_young;
39173 + atomic_long_unchecked_t flush_tlb;
39174 + atomic_long_unchecked_t flush_tlb_gru;
39175 + atomic_long_unchecked_t flush_tlb_gru_tgh;
39176 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39177
39178 - atomic_long_t copy_gpa;
39179 + atomic_long_unchecked_t copy_gpa;
39180
39181 - atomic_long_t mesq_receive;
39182 - atomic_long_t mesq_receive_none;
39183 - atomic_long_t mesq_send;
39184 - atomic_long_t mesq_send_failed;
39185 - atomic_long_t mesq_noop;
39186 - atomic_long_t mesq_send_unexpected_error;
39187 - atomic_long_t mesq_send_lb_overflow;
39188 - atomic_long_t mesq_send_qlimit_reached;
39189 - atomic_long_t mesq_send_amo_nacked;
39190 - atomic_long_t mesq_send_put_nacked;
39191 - atomic_long_t mesq_qf_not_full;
39192 - atomic_long_t mesq_qf_locked;
39193 - atomic_long_t mesq_qf_noop_not_full;
39194 - atomic_long_t mesq_qf_switch_head_failed;
39195 - atomic_long_t mesq_qf_unexpected_error;
39196 - atomic_long_t mesq_noop_unexpected_error;
39197 - atomic_long_t mesq_noop_lb_overflow;
39198 - atomic_long_t mesq_noop_qlimit_reached;
39199 - atomic_long_t mesq_noop_amo_nacked;
39200 - atomic_long_t mesq_noop_put_nacked;
39201 + atomic_long_unchecked_t mesq_receive;
39202 + atomic_long_unchecked_t mesq_receive_none;
39203 + atomic_long_unchecked_t mesq_send;
39204 + atomic_long_unchecked_t mesq_send_failed;
39205 + atomic_long_unchecked_t mesq_noop;
39206 + atomic_long_unchecked_t mesq_send_unexpected_error;
39207 + atomic_long_unchecked_t mesq_send_lb_overflow;
39208 + atomic_long_unchecked_t mesq_send_qlimit_reached;
39209 + atomic_long_unchecked_t mesq_send_amo_nacked;
39210 + atomic_long_unchecked_t mesq_send_put_nacked;
39211 + atomic_long_unchecked_t mesq_qf_not_full;
39212 + atomic_long_unchecked_t mesq_qf_locked;
39213 + atomic_long_unchecked_t mesq_qf_noop_not_full;
39214 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
39215 + atomic_long_unchecked_t mesq_qf_unexpected_error;
39216 + atomic_long_unchecked_t mesq_noop_unexpected_error;
39217 + atomic_long_unchecked_t mesq_noop_lb_overflow;
39218 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
39219 + atomic_long_unchecked_t mesq_noop_amo_nacked;
39220 + atomic_long_unchecked_t mesq_noop_put_nacked;
39221
39222 };
39223
39224 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39225 cchop_deallocate, tghop_invalidate, mcsop_last};
39226
39227 struct mcs_op_statistic {
39228 - atomic_long_t count;
39229 - atomic_long_t total;
39230 + atomic_long_unchecked_t count;
39231 + atomic_long_unchecked_t total;
39232 unsigned long max;
39233 };
39234
39235 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39236
39237 #define STAT(id) do { \
39238 if (gru_options & OPT_STATS) \
39239 - atomic_long_inc(&gru_stats.id); \
39240 + atomic_long_inc_unchecked(&gru_stats.id); \
39241 } while (0)
39242
39243 #ifdef CONFIG_SGI_GRU_DEBUG
39244 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39245 index 2275126..12a9dbfb 100644
39246 --- a/drivers/misc/sgi-xp/xp.h
39247 +++ b/drivers/misc/sgi-xp/xp.h
39248 @@ -289,7 +289,7 @@ struct xpc_interface {
39249 xpc_notify_func, void *);
39250 void (*received) (short, int, void *);
39251 enum xp_retval (*partid_to_nasids) (short, void *);
39252 -};
39253 +} __no_const;
39254
39255 extern struct xpc_interface xpc_interface;
39256
39257 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39258 index b94d5f7..7f494c5 100644
39259 --- a/drivers/misc/sgi-xp/xpc.h
39260 +++ b/drivers/misc/sgi-xp/xpc.h
39261 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
39262 void (*received_payload) (struct xpc_channel *, void *);
39263 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39264 };
39265 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39266
39267 /* struct xpc_partition act_state values (for XPC HB) */
39268
39269 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39270 /* found in xpc_main.c */
39271 extern struct device *xpc_part;
39272 extern struct device *xpc_chan;
39273 -extern struct xpc_arch_operations xpc_arch_ops;
39274 +extern xpc_arch_operations_no_const xpc_arch_ops;
39275 extern int xpc_disengage_timelimit;
39276 extern int xpc_disengage_timedout;
39277 extern int xpc_activate_IRQ_rcvd;
39278 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39279 index fd3688a..7e211a4 100644
39280 --- a/drivers/misc/sgi-xp/xpc_main.c
39281 +++ b/drivers/misc/sgi-xp/xpc_main.c
39282 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
39283 .notifier_call = xpc_system_die,
39284 };
39285
39286 -struct xpc_arch_operations xpc_arch_ops;
39287 +xpc_arch_operations_no_const xpc_arch_ops;
39288
39289 /*
39290 * Timer function to enforce the timelimit on the partition disengage.
39291 diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
39292 index 8b70e03..700bda6 100644
39293 --- a/drivers/misc/sgi-xp/xpc_sn2.c
39294 +++ b/drivers/misc/sgi-xp/xpc_sn2.c
39295 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
39296 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
39297 }
39298
39299 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
39300 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
39301 .setup_partitions = xpc_setup_partitions_sn2,
39302 .teardown_partitions = xpc_teardown_partitions_sn2,
39303 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
39304 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
39305 int ret;
39306 size_t buf_size;
39307
39308 - xpc_arch_ops = xpc_arch_ops_sn2;
39309 + pax_open_kernel();
39310 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
39311 + pax_close_kernel();
39312
39313 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
39314 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
39315 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
39316 index 8e08d71..7cb8c9b 100644
39317 --- a/drivers/misc/sgi-xp/xpc_uv.c
39318 +++ b/drivers/misc/sgi-xp/xpc_uv.c
39319 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
39320 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
39321 }
39322
39323 -static struct xpc_arch_operations xpc_arch_ops_uv = {
39324 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
39325 .setup_partitions = xpc_setup_partitions_uv,
39326 .teardown_partitions = xpc_teardown_partitions_uv,
39327 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
39328 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
39329 int
39330 xpc_init_uv(void)
39331 {
39332 - xpc_arch_ops = xpc_arch_ops_uv;
39333 + pax_open_kernel();
39334 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
39335 + pax_close_kernel();
39336
39337 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
39338 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
39339 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
39340 index 6fd20b42..650efe3 100644
39341 --- a/drivers/mmc/host/sdhci-pci.c
39342 +++ b/drivers/mmc/host/sdhci-pci.c
39343 @@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
39344 .probe = via_probe,
39345 };
39346
39347 -static const struct pci_device_id pci_ids[] __devinitdata = {
39348 +static const struct pci_device_id pci_ids[] __devinitconst = {
39349 {
39350 .vendor = PCI_VENDOR_ID_RICOH,
39351 .device = PCI_DEVICE_ID_RICOH_R5C822,
39352 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
39353 index e7563a9..5f90ce5 100644
39354 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
39355 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
39356 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
39357 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
39358 unsigned long timeo = jiffies + HZ;
39359
39360 + pax_track_stack();
39361 +
39362 /* Prevent setting state FL_SYNCING for chip in suspended state. */
39363 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
39364 goto sleep;
39365 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
39366 unsigned long initial_adr;
39367 int initial_len = len;
39368
39369 + pax_track_stack();
39370 +
39371 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
39372 adr += chip->start;
39373 initial_adr = adr;
39374 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
39375 int retries = 3;
39376 int ret;
39377
39378 + pax_track_stack();
39379 +
39380 adr += chip->start;
39381
39382 retry:
39383 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
39384 index 0667a67..3ab97ed 100644
39385 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
39386 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
39387 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
39388 unsigned long cmd_addr;
39389 struct cfi_private *cfi = map->fldrv_priv;
39390
39391 + pax_track_stack();
39392 +
39393 adr += chip->start;
39394
39395 /* Ensure cmd read/writes are aligned. */
39396 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
39397 DECLARE_WAITQUEUE(wait, current);
39398 int wbufsize, z;
39399
39400 + pax_track_stack();
39401 +
39402 /* M58LW064A requires bus alignment for buffer wriets -- saw */
39403 if (adr & (map_bankwidth(map)-1))
39404 return -EINVAL;
39405 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
39406 DECLARE_WAITQUEUE(wait, current);
39407 int ret = 0;
39408
39409 + pax_track_stack();
39410 +
39411 adr += chip->start;
39412
39413 /* Let's determine this according to the interleave only once */
39414 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
39415 unsigned long timeo = jiffies + HZ;
39416 DECLARE_WAITQUEUE(wait, current);
39417
39418 + pax_track_stack();
39419 +
39420 adr += chip->start;
39421
39422 /* Let's determine this according to the interleave only once */
39423 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
39424 unsigned long timeo = jiffies + HZ;
39425 DECLARE_WAITQUEUE(wait, current);
39426
39427 + pax_track_stack();
39428 +
39429 adr += chip->start;
39430
39431 /* Let's determine this according to the interleave only once */
39432 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
39433 index 5bf5f46..c5de373 100644
39434 --- a/drivers/mtd/devices/doc2000.c
39435 +++ b/drivers/mtd/devices/doc2000.c
39436 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39437
39438 /* The ECC will not be calculated correctly if less than 512 is written */
39439 /* DBB-
39440 - if (len != 0x200 && eccbuf)
39441 + if (len != 0x200)
39442 printk(KERN_WARNING
39443 "ECC needs a full sector write (adr: %lx size %lx)\n",
39444 (long) to, (long) len);
39445 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
39446 index 0990f78..bb4e8a4 100644
39447 --- a/drivers/mtd/devices/doc2001.c
39448 +++ b/drivers/mtd/devices/doc2001.c
39449 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
39450 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
39451
39452 /* Don't allow read past end of device */
39453 - if (from >= this->totlen)
39454 + if (from >= this->totlen || !len)
39455 return -EINVAL;
39456
39457 /* Don't allow a single read to cross a 512-byte block boundary */
39458 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
39459 index e56d6b4..f07e6cf 100644
39460 --- a/drivers/mtd/ftl.c
39461 +++ b/drivers/mtd/ftl.c
39462 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
39463 loff_t offset;
39464 uint16_t srcunitswap = cpu_to_le16(srcunit);
39465
39466 + pax_track_stack();
39467 +
39468 eun = &part->EUNInfo[srcunit];
39469 xfer = &part->XferInfo[xferunit];
39470 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
39471 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
39472 index 8aca552..146446e 100755
39473 --- a/drivers/mtd/inftlcore.c
39474 +++ b/drivers/mtd/inftlcore.c
39475 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
39476 struct inftl_oob oob;
39477 size_t retlen;
39478
39479 + pax_track_stack();
39480 +
39481 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
39482 "pending=%d)\n", inftl, thisVUC, pendingblock);
39483
39484 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
39485 index 32e82ae..ed50953 100644
39486 --- a/drivers/mtd/inftlmount.c
39487 +++ b/drivers/mtd/inftlmount.c
39488 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
39489 struct INFTLPartition *ip;
39490 size_t retlen;
39491
39492 + pax_track_stack();
39493 +
39494 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
39495
39496 /*
39497 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
39498 index 79bf40f..fe5f8fd 100644
39499 --- a/drivers/mtd/lpddr/qinfo_probe.c
39500 +++ b/drivers/mtd/lpddr/qinfo_probe.c
39501 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
39502 {
39503 map_word pfow_val[4];
39504
39505 + pax_track_stack();
39506 +
39507 /* Check identification string */
39508 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
39509 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
39510 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
39511 index 726a1b8..f46b460 100644
39512 --- a/drivers/mtd/mtdchar.c
39513 +++ b/drivers/mtd/mtdchar.c
39514 @@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
39515 u_long size;
39516 struct mtd_info_user info;
39517
39518 + pax_track_stack();
39519 +
39520 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
39521
39522 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
39523 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
39524 index 1002e18..26d82d5 100644
39525 --- a/drivers/mtd/nftlcore.c
39526 +++ b/drivers/mtd/nftlcore.c
39527 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
39528 int inplace = 1;
39529 size_t retlen;
39530
39531 + pax_track_stack();
39532 +
39533 memset(BlockMap, 0xff, sizeof(BlockMap));
39534 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
39535
39536 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39537 index 8b22b18..6fada85 100644
39538 --- a/drivers/mtd/nftlmount.c
39539 +++ b/drivers/mtd/nftlmount.c
39540 @@ -23,6 +23,7 @@
39541 #include <asm/errno.h>
39542 #include <linux/delay.h>
39543 #include <linux/slab.h>
39544 +#include <linux/sched.h>
39545 #include <linux/mtd/mtd.h>
39546 #include <linux/mtd/nand.h>
39547 #include <linux/mtd/nftl.h>
39548 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
39549 struct mtd_info *mtd = nftl->mbd.mtd;
39550 unsigned int i;
39551
39552 + pax_track_stack();
39553 +
39554 /* Assume logical EraseSize == physical erasesize for starting the scan.
39555 We'll sort it out later if we find a MediaHeader which says otherwise */
39556 /* Actually, we won't. The new DiskOnChip driver has already scanned
39557 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
39558 index 14cec04..09d8519 100644
39559 --- a/drivers/mtd/ubi/build.c
39560 +++ b/drivers/mtd/ubi/build.c
39561 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
39562 static int __init bytes_str_to_int(const char *str)
39563 {
39564 char *endp;
39565 - unsigned long result;
39566 + unsigned long result, scale = 1;
39567
39568 result = simple_strtoul(str, &endp, 0);
39569 if (str == endp || result >= INT_MAX) {
39570 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
39571
39572 switch (*endp) {
39573 case 'G':
39574 - result *= 1024;
39575 + scale *= 1024;
39576 case 'M':
39577 - result *= 1024;
39578 + scale *= 1024;
39579 case 'K':
39580 - result *= 1024;
39581 + scale *= 1024;
39582 if (endp[1] == 'i' && endp[2] == 'B')
39583 endp += 2;
39584 case '\0':
39585 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
39586 return -EINVAL;
39587 }
39588
39589 - return result;
39590 + if (result*scale >= INT_MAX) {
39591 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
39592 + str);
39593 + return -EINVAL;
39594 + }
39595 +
39596 + return result*scale;
39597 }
39598
39599 /**
39600 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
39601 index ab68886..ca405e8 100644
39602 --- a/drivers/net/atlx/atl2.c
39603 +++ b/drivers/net/atlx/atl2.c
39604 @@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
39605 */
39606
39607 #define ATL2_PARAM(X, desc) \
39608 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
39609 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
39610 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
39611 MODULE_PARM_DESC(X, desc);
39612 #else
39613 diff --git a/drivers/net/benet/Makefile b/drivers/net/benet/Makefile
39614 index a60cd80..0ed11ef 100644
39615 --- a/drivers/net/benet/Makefile
39616 +++ b/drivers/net/benet/Makefile
39617 @@ -1,7 +1,9 @@
39618 #
39619 -# Makefile to build the network driver for ServerEngine's BladeEngine.
39620 +# Makefile to build the be2net network driver
39621 #
39622
39623 +EXTRA_CFLAGS += -DCONFIG_PALAU
39624 +
39625 obj-$(CONFIG_BE2NET) += be2net.o
39626
39627 -be2net-y := be_main.o be_cmds.o be_ethtool.o
39628 +be2net-y := be_main.o be_cmds.o be_ethtool.o be_compat.o be_misc.o
39629 diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
39630 index 5c74ff0..7382603 100644
39631 --- a/drivers/net/benet/be.h
39632 +++ b/drivers/net/benet/be.h
39633 @@ -1,18 +1,18 @@
39634 /*
39635 - * Copyright (C) 2005 - 2009 ServerEngines
39636 + * Copyright (C) 2005 - 2011 Emulex
39637 * All rights reserved.
39638 *
39639 * This program is free software; you can redistribute it and/or
39640 * modify it under the terms of the GNU General Public License version 2
39641 - * as published by the Free Software Foundation. The full GNU General
39642 + * as published by the Free Software Foundation. The full GNU General
39643 * Public License is included in this distribution in the file called COPYING.
39644 *
39645 * Contact Information:
39646 - * linux-drivers@serverengines.com
39647 + * linux-drivers@emulex.com
39648 *
39649 - * ServerEngines
39650 - * 209 N. Fair Oaks Ave
39651 - * Sunnyvale, CA 94085
39652 + * Emulex
39653 + * 3333 Susan Street
39654 + * Costa Mesa, CA 92626
39655 */
39656
39657 #ifndef BE_H
39658 @@ -29,32 +29,53 @@
39659 #include <linux/workqueue.h>
39660 #include <linux/interrupt.h>
39661 #include <linux/firmware.h>
39662 +#include <linux/jhash.h>
39663 +#ifndef CONFIG_PALAU
39664 +#include <linux/inet_lro.h>
39665 +#endif
39666
39667 +#ifdef CONFIG_PALAU
39668 +#include "be_compat.h"
39669 +#endif
39670 #include "be_hw.h"
39671
39672 -#define DRV_VER "2.101.205"
39673 +#ifdef CONFIG_PALAU
39674 +#include "version.h"
39675 +#define DRV_VER STR_BE_MAJOR "." STR_BE_MINOR "."\
39676 + STR_BE_BUILD "." STR_BE_BRANCH
39677 +#else
39678 +#define DRV_VER "2.0.348"
39679 +#endif
39680 #define DRV_NAME "be2net"
39681 -#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39682 -#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
39683 -#define OC_NAME "Emulex OneConnect 10Gbps NIC"
39684 -#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
39685 -#define DRV_DESC BE_NAME "Driver"
39686 +#define BE_NAME "Emulex BladeEngine2"
39687 +#define BE3_NAME "Emulex BladeEngine3"
39688 +#define OC_NAME "Emulex OneConnect"
39689 +#define OC_NAME_BE OC_NAME "(be3)"
39690 +#define OC_NAME_LANCER OC_NAME "(Lancer)"
39691 +#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
39692
39693 -#define BE_VENDOR_ID 0x19a2
39694 +#define BE_VENDOR_ID 0x19a2
39695 +#define EMULEX_VENDOR_ID 0x10df
39696 #define BE_DEVICE_ID1 0x211
39697 #define BE_DEVICE_ID2 0x221
39698 -#define OC_DEVICE_ID1 0x700
39699 -#define OC_DEVICE_ID2 0x701
39700 -#define OC_DEVICE_ID3 0x710
39701 +#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
39702 +#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
39703 +#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
39704 +
39705 +#define OC_SUBSYS_DEVICE_ID1 0xE602
39706 +#define OC_SUBSYS_DEVICE_ID2 0xE642
39707 +#define OC_SUBSYS_DEVICE_ID3 0xE612
39708 +#define OC_SUBSYS_DEVICE_ID4 0xE652
39709
39710 static inline char *nic_name(struct pci_dev *pdev)
39711 {
39712 switch (pdev->device) {
39713 case OC_DEVICE_ID1:
39714 - case OC_DEVICE_ID2:
39715 return OC_NAME;
39716 + case OC_DEVICE_ID2:
39717 + return OC_NAME_BE;
39718 case OC_DEVICE_ID3:
39719 - return OC_NAME1;
39720 + return OC_NAME_LANCER;
39721 case BE_DEVICE_ID2:
39722 return BE3_NAME;
39723 default:
39724 @@ -63,7 +84,7 @@ static inline char *nic_name(struct pci_dev *pdev)
39725 }
39726
39727 /* Number of bytes of an RX frame that are copied to skb->data */
39728 -#define BE_HDR_LEN 64
39729 +#define BE_HDR_LEN ((u16) 64)
39730 #define BE_MAX_JUMBO_FRAME_SIZE 9018
39731 #define BE_MIN_MTU 256
39732
39733 @@ -79,10 +100,24 @@ static inline char *nic_name(struct pci_dev *pdev)
39734 #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
39735 #define MCC_CQ_LEN 256
39736
39737 +#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
39738 +
39739 +#define MAX_RX_QS (MAX_RSS_QS + 1)
39740 +
39741 +#ifdef MQ_TX
39742 +#define MAX_TX_QS 8
39743 +#else
39744 +#define MAX_TX_QS 1
39745 +#endif
39746 +
39747 +#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RSS qs + 1 def Rx + Tx */
39748 #define BE_NAPI_WEIGHT 64
39749 -#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
39750 +#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
39751 #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
39752
39753 +#define BE_MAX_LRO_DESCRIPTORS 16
39754 +#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
39755 +
39756 #define FW_VER_LEN 32
39757
39758 struct be_dma_mem {
39759 @@ -127,6 +162,11 @@ static inline void *queue_tail_node(struct be_queue_info *q)
39760 return q->dma_mem.va + q->tail * q->entry_size;
39761 }
39762
39763 +static inline void *queue_index_node(struct be_queue_info *q, u16 index)
39764 +{
39765 + return q->dma_mem.va + index * q->entry_size;
39766 +}
39767 +
39768 static inline void queue_head_inc(struct be_queue_info *q)
39769 {
39770 index_inc(&q->head, q->len);
39771 @@ -137,6 +177,7 @@ static inline void queue_tail_inc(struct be_queue_info *q)
39772 index_inc(&q->tail, q->len);
39773 }
39774
39775 +
39776 struct be_eq_obj {
39777 struct be_queue_info q;
39778 char desc[32];
39779 @@ -146,6 +187,7 @@ struct be_eq_obj {
39780 u16 min_eqd; /* in usecs */
39781 u16 max_eqd; /* in usecs */
39782 u16 cur_eqd; /* in usecs */
39783 + u8 eq_idx;
39784
39785 struct napi_struct napi;
39786 };
39787 @@ -153,49 +195,20 @@ struct be_eq_obj {
39788 struct be_mcc_obj {
39789 struct be_queue_info q;
39790 struct be_queue_info cq;
39791 + bool rearm_cq;
39792 };
39793
39794 -struct be_drvr_stats {
39795 +struct be_tx_stats {
39796 u32 be_tx_reqs; /* number of TX requests initiated */
39797 u32 be_tx_stops; /* number of times TX Q was stopped */
39798 - u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */
39799 u32 be_tx_wrbs; /* number of tx WRBs used */
39800 - u32 be_tx_events; /* number of tx completion events */
39801 u32 be_tx_compl; /* number of tx completion entries processed */
39802 ulong be_tx_jiffies;
39803 u64 be_tx_bytes;
39804 u64 be_tx_bytes_prev;
39805 u64 be_tx_pkts;
39806 u32 be_tx_rate;
39807 -
39808 - u32 cache_barrier[16];
39809 -
39810 - u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
39811 - u32 be_polls; /* number of times NAPI called poll function */
39812 - u32 be_rx_events; /* number of ucast rx completion events */
39813 - u32 be_rx_compl; /* number of rx completion entries processed */
39814 - ulong be_rx_jiffies;
39815 - u64 be_rx_bytes;
39816 - u64 be_rx_bytes_prev;
39817 - u64 be_rx_pkts;
39818 - u32 be_rx_rate;
39819 - /* number of non ether type II frames dropped where
39820 - * frame len > length field of Mac Hdr */
39821 - u32 be_802_3_dropped_frames;
39822 - /* number of non ether type II frames malformed where
39823 - * in frame len < length field of Mac Hdr */
39824 - u32 be_802_3_malformed_frames;
39825 - u32 be_rxcp_err; /* Num rx completion entries w/ err set. */
39826 - ulong rx_fps_jiffies; /* jiffies at last FPS calc */
39827 - u32 be_rx_frags;
39828 - u32 be_prev_rx_frags;
39829 - u32 be_rx_fps; /* Rx frags per second */
39830 -};
39831 -
39832 -struct be_stats_obj {
39833 - struct be_drvr_stats drvr_stats;
39834 - struct net_device_stats net_stats;
39835 - struct be_dma_mem cmd;
39836 + u32 be_ipv6_ext_hdr_tx_drop;
39837 };
39838
39839 struct be_tx_obj {
39840 @@ -203,23 +216,124 @@ struct be_tx_obj {
39841 struct be_queue_info cq;
39842 /* Remember the skbs that were transmitted */
39843 struct sk_buff *sent_skb_list[TX_Q_LEN];
39844 + struct be_tx_stats stats;
39845 };
39846
39847 /* Struct to remember the pages posted for rx frags */
39848 struct be_rx_page_info {
39849 struct page *page;
39850 - dma_addr_t bus;
39851 + DEFINE_DMA_UNMAP_ADDR(bus);
39852 u16 page_offset;
39853 bool last_page_user;
39854 };
39855
39856 +struct be_rx_stats {
39857 + u32 rx_post_fail;/* number of ethrx buffer alloc failures */
39858 + u32 rx_polls; /* number of times NAPI called poll function */
39859 + u32 rx_events; /* number of ucast rx completion events */
39860 + u32 rx_compl; /* number of rx completion entries processed */
39861 + ulong rx_jiffies;
39862 + u64 rx_bytes;
39863 + u64 rx_bytes_prev;
39864 + u64 rx_pkts;
39865 + u32 rx_rate;
39866 + u32 rx_mcast_pkts;
39867 + u32 rxcp_err; /* Num rx completion entries w/ err set. */
39868 + ulong rx_fps_jiffies; /* jiffies at last FPS calc */
39869 + u32 rx_frags;
39870 + u32 prev_rx_frags;
39871 + u32 rx_fps; /* Rx frags per second */
39872 + u32 rx_drops_no_frags;
39873 +};
39874 +
39875 +struct be_rx_compl_info {
39876 + u32 rss_hash;
39877 + u16 vlan_tag;
39878 + u16 pkt_size;
39879 + u16 rxq_idx;
39880 + u16 port;
39881 + u8 vlanf;
39882 + u8 num_rcvd;
39883 + u8 err;
39884 + u8 ipf;
39885 + u8 tcpf;
39886 + u8 udpf;
39887 + u8 ip_csum;
39888 + u8 l4_csum;
39889 + u8 ipv6;
39890 + u8 vtm;
39891 + u8 pkt_type;
39892 +};
39893 +
39894 struct be_rx_obj {
39895 + struct be_adapter *adapter;
39896 struct be_queue_info q;
39897 struct be_queue_info cq;
39898 - struct be_rx_page_info page_info_tbl[RX_Q_LEN];
39899 + struct be_rx_compl_info rxcp;
39900 + struct be_rx_page_info *page_info_tbl;
39901 + struct net_lro_mgr lro_mgr;
39902 + struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
39903 + struct be_eq_obj rx_eq;
39904 + struct be_rx_stats stats;
39905 + u8 rss_id;
39906 + bool rx_post_starved; /* Zero rx frags have been posted to BE */
39907 + u16 prev_frag_idx;
39908 + u32 cache_line_barrier[16];
39909 };
39910
39911 -#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
39912 +struct be_drv_stats {
39913 + u32 be_on_die_temperature;
39914 + u32 be_tx_events;
39915 + u32 eth_red_drops;
39916 + u32 rx_drops_no_pbuf;
39917 + u32 rx_drops_no_txpb;
39918 + u32 rx_drops_no_erx_descr;
39919 + u32 rx_drops_no_tpre_descr;
39920 + u32 rx_drops_too_many_frags;
39921 + u32 rx_drops_invalid_ring;
39922 + u32 forwarded_packets;
39923 + u32 rx_drops_mtu;
39924 + u32 rx_crc_errors;
39925 + u32 rx_alignment_symbol_errors;
39926 + u32 rx_pause_frames;
39927 + u32 rx_priority_pause_frames;
39928 + u32 rx_control_frames;
39929 + u32 rx_in_range_errors;
39930 + u32 rx_out_range_errors;
39931 + u32 rx_frame_too_long;
39932 + u32 rx_address_match_errors;
39933 + u32 rx_dropped_too_small;
39934 + u32 rx_dropped_too_short;
39935 + u32 rx_dropped_header_too_small;
39936 + u32 rx_dropped_tcp_length;
39937 + u32 rx_dropped_runt;
39938 + u32 rx_ip_checksum_errs;
39939 + u32 rx_tcp_checksum_errs;
39940 + u32 rx_udp_checksum_errs;
39941 + u32 rx_switched_unicast_packets;
39942 + u32 rx_switched_multicast_packets;
39943 + u32 rx_switched_broadcast_packets;
39944 + u32 tx_pauseframes;
39945 + u32 tx_priority_pauseframes;
39946 + u32 tx_controlframes;
39947 + u32 rxpp_fifo_overflow_drop;
39948 + u32 rx_input_fifo_overflow_drop;
39949 + u32 pmem_fifo_overflow_drop;
39950 + u32 jabber_events;
39951 +};
39952 +
39953 +struct be_vf_cfg {
39954 + unsigned char vf_mac_addr[ETH_ALEN];
39955 + u32 vf_if_handle;
39956 + u32 vf_pmac_id;
39957 + u16 vf_def_vid;
39958 + u16 vf_vlan_tag;
39959 + u32 vf_tx_rate;
39960 +};
39961 +
39962 +#define BE_INVALID_PMAC_ID 0xffffffff
39963 +#define BE_FLAGS_DCBX (1 << 16)
39964 +
39965 struct be_adapter {
39966 struct pci_dev *pdev;
39967 struct net_device *netdev;
39968 @@ -228,7 +342,7 @@ struct be_adapter {
39969 u8 __iomem *db; /* Door Bell */
39970 u8 __iomem *pcicfg; /* PCI config space */
39971
39972 - spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
39973 + struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
39974 struct be_dma_mem mbox_mem;
39975 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
39976 * is stored for freeing purpose */
39977 @@ -238,66 +352,121 @@ struct be_adapter {
39978 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
39979 spinlock_t mcc_cq_lock;
39980
39981 - struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS];
39982 - bool msix_enabled;
39983 + struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
39984 + u32 num_msix_vec;
39985 bool isr_registered;
39986
39987 /* TX Rings */
39988 struct be_eq_obj tx_eq;
39989 - struct be_tx_obj tx_obj;
39990 + struct be_tx_obj tx_obj[MAX_TX_QS];
39991 + u8 num_tx_qs;
39992 + u8 prio_tc_map[MAX_TX_QS]; /* prio_tc_map[prio] => tc-id */
39993 + u8 tc_txq_map[MAX_TX_QS]; /* tc_txq_map[tc-id] => txq index */
39994
39995 u32 cache_line_break[8];
39996
39997 /* Rx rings */
39998 - struct be_eq_obj rx_eq;
39999 - struct be_rx_obj rx_obj;
40000 + struct be_rx_obj rx_obj[MAX_RX_QS]; /* one default non-rss Q */
40001 + u32 num_rx_qs;
40002 +
40003 + struct be_dma_mem stats_cmd;
40004 + struct net_device_stats net_stats;
40005 + struct be_drv_stats drv_stats;
40006 u32 big_page_size; /* Compounded page size shared by rx wrbs */
40007 - bool rx_post_starved; /* Zero rx frags have been posted to BE */
40008
40009 struct vlan_group *vlan_grp;
40010 - u16 num_vlans;
40011 + u16 vlans_added;
40012 + u16 max_vlans; /* Number of vlans supported */
40013 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
40014 + u8 vlan_prio_bmap; /* Available priority BitMap */
40015 + u16 recommended_prio; /* Recommended Priority */
40016 + struct be_dma_mem rx_filter;
40017
40018 - struct be_stats_obj stats;
40019 /* Work queue used to perform periodic tasks like getting statistics */
40020 struct delayed_work work;
40021 + u16 work_counter;
40022
40023 - /* Ethtool knobs and info */
40024 - bool rx_csum; /* BE card must perform rx-checksumming */
40025 + u32 flags;
40026 + bool rx_csum; /* BE card must perform rx-checksumming */
40027 + u32 max_rx_coal;
40028 char fw_ver[FW_VER_LEN];
40029 u32 if_handle; /* Used to configure filtering */
40030 u32 pmac_id; /* MAC addr handle used by BE card */
40031 + u32 beacon_state; /* for set_phys_id */
40032
40033 - bool link_up;
40034 + bool eeh_err;
40035 + int link_status;
40036 u32 port_num;
40037 + u32 hba_port_num;
40038 bool promiscuous;
40039 - u32 cap;
40040 + bool wol;
40041 + u32 function_mode;
40042 + u32 function_caps;
40043 u32 rx_fc; /* Rx flow control */
40044 u32 tx_fc; /* Tx flow control */
40045 + bool ue_detected;
40046 + bool stats_cmd_sent;
40047 + bool gro_supported;
40048 + int link_speed;
40049 + u8 port_type;
40050 + u8 transceiver;
40051 + u8 autoneg;
40052 u8 generation; /* BladeEngine ASIC generation */
40053 + u32 flash_status;
40054 + struct completion flash_compl;
40055 +
40056 + u8 eq_next_idx;
40057 + bool be3_native;
40058 + u16 num_vfs;
40059 + struct be_vf_cfg *vf_cfg;
40060 + u8 is_virtfn;
40061 + u16 pvid;
40062 + u32 sli_family;
40063 + u8 port_name[4];
40064 + char model_number[32];
40065 };
40066
40067 /* BladeEngine Generation numbers */
40068 #define BE_GEN2 2
40069 #define BE_GEN3 3
40070
40071 -extern const struct ethtool_ops be_ethtool_ops;
40072 +#define ON 1
40073 +#define OFF 0
40074 +#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
40075 +#define lancer_A0_chip(adapter) \
40076 + (adapter->sli_family == LANCER_A0_SLI_FAMILY)
40077
40078 -#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
40079 +extern struct ethtool_ops be_ethtool_ops;
40080
40081 -static inline unsigned int be_pci_func(struct be_adapter *adapter)
40082 -{
40083 - return PCI_FUNC(adapter->pdev->devfn);
40084 -}
40085 +#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
40086 +#define tx_stats(txo) (&txo->stats)
40087 +#define rx_stats(rxo) (&rxo->stats)
40088
40089 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
40090 +#define BE_SET_NETDEV_OPS(netdev, ops) be_netdev_ops_init(netdev, ops)
40091 +#else
40092 #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
40093 +#endif
40094 +
40095 +#define for_all_rx_queues(adapter, rxo, i) \
40096 + for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
40097 + i++, rxo++)
40098 +
40099 +/* Just skip the first default non-rss queue */
40100 +#define for_all_rss_queues(adapter, rxo, i) \
40101 + for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
40102 + i++, rxo++)
40103 +
40104 +#define for_all_tx_queues(adapter, txo, i) \
40105 + for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
40106 + i++, txo++)
40107
40108 #define PAGE_SHIFT_4K 12
40109 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
40110
40111 /* Returns number of pages spanned by the data starting at the given addr */
40112 -#define PAGES_4K_SPANNED(_address, size) \
40113 - ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
40114 +#define PAGES_4K_SPANNED(_address, size) \
40115 + ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
40116 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
40117
40118 /* Byte offset into the page corresponding to given address */
40119 @@ -305,7 +474,7 @@ static inline unsigned int be_pci_func(struct be_adapter *adapter)
40120 ((size_t)(addr) & (PAGE_SIZE_4K-1))
40121
40122 /* Returns bit offset within a DWORD of a bitfield */
40123 -#define AMAP_BIT_OFFSET(_struct, field) \
40124 +#define AMAP_BIT_OFFSET(_struct, field) \
40125 (((size_t)&(((_struct *)0)->field))%32)
40126
40127 /* Returns the bit mask of the field that is NOT shifted into location. */
40128 @@ -356,6 +525,11 @@ static inline void swap_dws(void *wrb, int len)
40129 #endif /* __BIG_ENDIAN */
40130 }
40131
40132 +static inline bool vlan_configured(struct be_adapter *adapter)
40133 +{
40134 + return adapter->vlan_grp && adapter->vlans_added;
40135 +}
40136 +
40137 static inline u8 is_tcp_pkt(struct sk_buff *skb)
40138 {
40139 u8 val = 0;
40140 @@ -380,9 +554,65 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
40141 return val;
40142 }
40143
40144 +static inline u8 is_ipv6_ext_hdr(struct sk_buff *skb)
40145 +{
40146 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
40147 + if (ip_hdr(skb)->version == 6)
40148 + return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr);
40149 + else
40150 +#endif
40151 + return 0;
40152 +}
40153 +
40154 +static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
40155 +{
40156 + u32 sli_intf;
40157 +
40158 + pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
40159 + adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
40160 +}
40161 +
40162 +static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
40163 +{
40164 + u32 addr;
40165 +
40166 + addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
40167 +
40168 + mac[5] = (u8)(addr & 0xFF);
40169 + mac[4] = (u8)((addr >> 8) & 0xFF);
40170 + mac[3] = (u8)((addr >> 16) & 0xFF);
40171 + /* Use the OUI programmed in hardware */
40172 + memcpy(mac, adapter->netdev->dev_addr, 3);
40173 +}
40174 +
40175 +static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
40176 + struct sk_buff *skb)
40177 +{
40178 + u8 vlan_prio = 0;
40179 + u16 vlan_tag = 0;
40180 +
40181 + vlan_tag = vlan_tx_tag_get(skb);
40182 + vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
40183 + /* If vlan priority provided by OS is NOT in available bmap */
40184 + if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
40185 + vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
40186 + adapter->recommended_prio;
40187 +
40188 + return vlan_tag;
40189 +}
40190 +
40191 +#define be_physfn(adapter) (!adapter->is_virtfn)
40192 +
40193 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
40194 u16 num_popped);
40195 -extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
40196 +extern void be_link_status_update(struct be_adapter *adapter, int link_status);
40197 extern void netdev_stats_update(struct be_adapter *adapter);
40198 +extern void be_parse_stats(struct be_adapter *adapter);
40199 extern int be_load_fw(struct be_adapter *adapter, u8 *func);
40200 +
40201 +#ifdef CONFIG_PALAU
40202 +extern void be_sysfs_create_group(struct be_adapter *adapter);
40203 +extern void be_sysfs_remove_group(struct be_adapter *adapter);
40204 +#endif
40205 +
40206 #endif /* BE_H */
40207 diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
40208 index 28a0eda..b4ca89c 100644
40209 --- a/drivers/net/benet/be_cmds.c
40210 +++ b/drivers/net/benet/be_cmds.c
40211 @@ -1,30 +1,45 @@
40212 /*
40213 - * Copyright (C) 2005 - 2009 ServerEngines
40214 + * Copyright (C) 2005 - 2011 Emulex
40215 * All rights reserved.
40216 *
40217 * This program is free software; you can redistribute it and/or
40218 * modify it under the terms of the GNU General Public License version 2
40219 - * as published by the Free Software Foundation. The full GNU General
40220 + * as published by the Free Software Foundation. The full GNU General
40221 * Public License is included in this distribution in the file called COPYING.
40222 *
40223 * Contact Information:
40224 - * linux-drivers@serverengines.com
40225 + * linux-drivers@emulex.com
40226 *
40227 - * ServerEngines
40228 - * 209 N. Fair Oaks Ave
40229 - * Sunnyvale, CA 94085
40230 + * Emulex
40231 + * 3333 Susan Street
40232 + * Costa Mesa, CA 92626
40233 */
40234
40235 #include "be.h"
40236 #include "be_cmds.h"
40237
40238 +/* Must be a power of 2 or else MODULO will BUG_ON */
40239 +static int be_get_temp_freq = 64;
40240 +
40241 +static inline void *embedded_payload(struct be_mcc_wrb *wrb)
40242 +{
40243 + return wrb->payload.embedded_payload;
40244 +}
40245 +
40246 static void be_mcc_notify(struct be_adapter *adapter)
40247 {
40248 struct be_queue_info *mccq = &adapter->mcc_obj.q;
40249 u32 val = 0;
40250
40251 + if (adapter->eeh_err) {
40252 + dev_info(&adapter->pdev->dev, "Error in Card Detected! Cannot issue commands\n");
40253 + return;
40254 + }
40255 +
40256 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
40257 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
40258 +
40259 + wmb();
40260 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
40261 }
40262
40263 @@ -59,21 +74,67 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
40264
40265 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
40266 CQE_STATUS_COMPL_MASK;
40267 +
40268 + if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
40269 + (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
40270 + adapter->flash_status = compl_status;
40271 + complete(&adapter->flash_compl);
40272 + }
40273 +
40274 if (compl_status == MCC_STATUS_SUCCESS) {
40275 - if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
40276 - struct be_cmd_resp_get_stats *resp =
40277 - adapter->stats.cmd.va;
40278 - be_dws_le_to_cpu(&resp->hw_stats,
40279 - sizeof(resp->hw_stats));
40280 + if ((compl->tag0 == OPCODE_ETH_GET_STATISTICS) &&
40281 + (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
40282 + if (adapter->generation == BE_GEN3) {
40283 + struct be_cmd_resp_get_stats_v1 *resp =
40284 + adapter->stats_cmd.va;
40285 +
40286 + be_dws_le_to_cpu(&resp->hw_stats,
40287 + sizeof(resp->hw_stats));
40288 + } else {
40289 + struct be_cmd_resp_get_stats_v0 *resp =
40290 + adapter->stats_cmd.va;
40291 +
40292 + be_dws_le_to_cpu(&resp->hw_stats,
40293 + sizeof(resp->hw_stats));
40294 + }
40295 + be_parse_stats(adapter);
40296 netdev_stats_update(adapter);
40297 + adapter->stats_cmd_sent = false;
40298 + }
40299 + if (compl->tag0 ==
40300 + OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
40301 + struct be_mcc_wrb *mcc_wrb =
40302 + queue_index_node(&adapter->mcc_obj.q,
40303 + compl->tag1);
40304 + struct be_cmd_resp_get_cntl_addnl_attribs *resp =
40305 + embedded_payload(mcc_wrb);
40306 + adapter->drv_stats.be_on_die_temperature =
40307 + resp->on_die_temperature;
40308 + }
40309 + } else {
40310 + if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
40311 + be_get_temp_freq = 0;
40312 +
40313 + if (compl->tag1 == MCC_WRB_PASS_THRU)
40314 + goto done;
40315 +
40316 + if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
40317 + compl_status == MCC_STATUS_ILLEGAL_REQUEST)
40318 + goto done;
40319 +
40320 + if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
40321 + dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
40322 + "permitted to execute this cmd (opcode %d)\n",
40323 + compl->tag0);
40324 + } else {
40325 + extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
40326 + CQE_STATUS_EXTD_MASK;
40327 + dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
40328 + "status %d, extd-status %d\n",
40329 + compl->tag0, compl_status, extd_status);
40330 }
40331 - } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
40332 - extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
40333 - CQE_STATUS_EXTD_MASK;
40334 - dev_warn(&adapter->pdev->dev,
40335 - "Error in cmd completion: status(compl/extd)=%d/%d\n",
40336 - compl_status, extd_status);
40337 }
40338 +done:
40339 return compl_status;
40340 }
40341
40342 @@ -82,7 +143,70 @@ static void be_async_link_state_process(struct be_adapter *adapter,
40343 struct be_async_event_link_state *evt)
40344 {
40345 be_link_status_update(adapter,
40346 - evt->port_link_status == ASYNC_EVENT_LINK_UP);
40347 + ((evt->port_link_status & ~ASYNC_EVENT_LOGICAL) ==
40348 + ASYNC_EVENT_LINK_UP ? LINK_UP : LINK_DOWN));
40349 +}
40350 +
40351 +/* Grp5 CoS Priority evt */
40352 +static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
40353 + struct be_async_event_grp5_cos_priority *evt)
40354 +{
40355 + if (evt->valid) {
40356 + adapter->vlan_prio_bmap = evt->available_priority_bmap;
40357 + adapter->recommended_prio &= ~VLAN_PRIO_MASK;
40358 + adapter->recommended_prio =
40359 + evt->reco_default_priority << VLAN_PRIO_SHIFT;
40360 + }
40361 +}
40362 +
40363 +/* Grp5 QOS Speed evt */
40364 +static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
40365 + struct be_async_event_grp5_qos_link_speed *evt)
40366 +{
40367 + if (evt->physical_port == adapter->hba_port_num) {
40368 + /* qos_link_speed is in units of 10 Mbps */
40369 + adapter->link_speed = evt->qos_link_speed * 10;
40370 + }
40371 +}
40372 +
40373 +/*Grp5 PVID evt*/
40374 +static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
40375 + struct be_async_event_grp5_pvid_state *evt)
40376 +{
40377 + if (evt->enabled)
40378 + adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK ;
40379 + else
40380 + adapter->pvid = 0;
40381 +}
40382 +
40383 +static void be_async_grp5_evt_process(struct be_adapter *adapter,
40384 + u32 trailer, struct be_mcc_compl *evt)
40385 +{
40386 + u8 event_type = 0;
40387 +
40388 + event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
40389 + ASYNC_TRAILER_EVENT_TYPE_MASK;
40390 +
40391 + switch (event_type) {
40392 + case ASYNC_EVENT_COS_PRIORITY:
40393 + be_async_grp5_cos_priority_process(adapter,
40394 + (struct be_async_event_grp5_cos_priority *)evt);
40395 + break;
40396 + case ASYNC_EVENT_QOS_SPEED:
40397 + be_async_grp5_qos_speed_process(adapter,
40398 + (struct be_async_event_grp5_qos_link_speed *)evt);
40399 + break;
40400 + case ASYNC_EVENT_PVID_STATE:
40401 + be_async_grp5_pvid_state_process(adapter,
40402 + (struct be_async_event_grp5_pvid_state *)evt);
40403 + break;
40404 + case GRP5_TYPE_PRIO_TC_MAP:
40405 + memcpy(adapter->prio_tc_map, evt, MAX_TX_QS);
40406 + break;
40407 + default:
40408 + printk(KERN_WARNING "Unknown grp5 event!\n");
40409 + break;
40410 + }
40411 }
40412
40413 static inline bool is_link_state_evt(u32 trailer)
40414 @@ -92,6 +216,13 @@ static inline bool is_link_state_evt(u32 trailer)
40415 ASYNC_EVENT_CODE_LINK_STATE);
40416 }
40417
40418 +static inline bool is_grp5_evt(u32 trailer)
40419 +{
40420 + return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
40421 + ASYNC_TRAILER_EVENT_CODE_MASK) ==
40422 + ASYNC_EVENT_CODE_GRP_5);
40423 +}
40424 +
40425 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
40426 {
40427 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
40428 @@ -104,46 +235,67 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
40429 return NULL;
40430 }
40431
40432 -int be_process_mcc(struct be_adapter *adapter)
40433 +void be_async_mcc_enable(struct be_adapter *adapter)
40434 +{
40435 + spin_lock_bh(&adapter->mcc_cq_lock);
40436 +
40437 + be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
40438 + adapter->mcc_obj.rearm_cq = true;
40439 +
40440 + spin_unlock_bh(&adapter->mcc_cq_lock);
40441 +}
40442 +
40443 +void be_async_mcc_disable(struct be_adapter *adapter)
40444 +{
40445 + adapter->mcc_obj.rearm_cq = false;
40446 +}
40447 +
40448 +int be_process_mcc(struct be_adapter *adapter, int *status)
40449 {
40450 struct be_mcc_compl *compl;
40451 - int num = 0, status = 0;
40452 + int num = 0;
40453 + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
40454
40455 spin_lock_bh(&adapter->mcc_cq_lock);
40456 while ((compl = be_mcc_compl_get(adapter))) {
40457 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
40458 /* Interpret flags as an async trailer */
40459 - BUG_ON(!is_link_state_evt(compl->flags));
40460 -
40461 - /* Interpret compl as a async link evt */
40462 - be_async_link_state_process(adapter,
40463 + if (is_link_state_evt(compl->flags))
40464 + be_async_link_state_process(adapter,
40465 (struct be_async_event_link_state *) compl);
40466 + else if (is_grp5_evt(compl->flags))
40467 + be_async_grp5_evt_process(adapter,
40468 + compl->flags, compl);
40469 +
40470 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
40471 - status = be_mcc_compl_process(adapter, compl);
40472 - atomic_dec(&adapter->mcc_obj.q.used);
40473 + *status = be_mcc_compl_process(adapter, compl);
40474 + atomic_dec(&mcc_obj->q.used);
40475 }
40476 be_mcc_compl_use(compl);
40477 num++;
40478 }
40479
40480 - if (num)
40481 - be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
40482 -
40483 spin_unlock_bh(&adapter->mcc_cq_lock);
40484 - return status;
40485 + return num;
40486 }
40487
40488 /* Wait till no more pending mcc requests are present */
40489 static int be_mcc_wait_compl(struct be_adapter *adapter)
40490 {
40491 #define mcc_timeout 120000 /* 12s timeout */
40492 - int i, status;
40493 + int i, num, status = 0;
40494 + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
40495 +
40496 + if (adapter->eeh_err)
40497 + return -EIO;
40498 +
40499 for (i = 0; i < mcc_timeout; i++) {
40500 - status = be_process_mcc(adapter);
40501 - if (status)
40502 - return status;
40503 + num = be_process_mcc(adapter, &status);
40504 + if (num)
40505 + be_cq_notify(adapter, mcc_obj->cq.id,
40506 + mcc_obj->rearm_cq, num);
40507
40508 - if (atomic_read(&adapter->mcc_obj.q.used) == 0)
40509 + if (atomic_read(&mcc_obj->q.used) == 0)
40510 break;
40511 udelay(100);
40512 }
40513 @@ -151,7 +303,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
40514 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
40515 return -1;
40516 }
40517 - return 0;
40518 + return status;
40519 }
40520
40521 /* Notify MCC requests and wait for completion */
40522 @@ -163,23 +315,34 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
40523
40524 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
40525 {
40526 - int cnt = 0, wait = 5;
40527 + int msecs = 0;
40528 u32 ready;
40529
40530 + if (adapter->eeh_err) {
40531 + dev_err(&adapter->pdev->dev, "Error detected in card.Cannot issue commands\n");
40532 + return -EIO;
40533 + }
40534 do {
40535 - ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
40536 + ready = ioread32(db);
40537 + if (ready == 0xffffffff) {
40538 + dev_err(&adapter->pdev->dev,
40539 + "pci slot disconnected\n");
40540 + return -1;
40541 + }
40542 +
40543 + ready &= MPU_MAILBOX_DB_RDY_MASK;
40544 if (ready)
40545 break;
40546
40547 - if (cnt > 4000000) {
40548 + if (msecs > 4000) {
40549 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
40550 + be_detect_dump_ue(adapter);
40551 return -1;
40552 }
40553
40554 - if (cnt > 50)
40555 - wait = 200;
40556 - cnt += wait;
40557 - udelay(wait);
40558 + set_current_state(TASK_UNINTERRUPTIBLE);
40559 + schedule_timeout(msecs_to_jiffies(1));
40560 + msecs++;
40561 } while (true);
40562
40563 return 0;
40564 @@ -198,6 +361,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
40565 struct be_mcc_mailbox *mbox = mbox_mem->va;
40566 struct be_mcc_compl *compl = &mbox->compl;
40567
40568 + /* wait for ready to be set */
40569 + status = be_mbox_db_ready_wait(adapter, db);
40570 + if (status != 0)
40571 + return status;
40572 +
40573 val |= MPU_MAILBOX_DB_HI_MASK;
40574 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
40575 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
40576 @@ -232,7 +400,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
40577
40578 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
40579 {
40580 - u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
40581 + u32 sem;
40582 +
40583 + if (lancer_chip(adapter))
40584 + sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
40585 + else
40586 + sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
40587
40588 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
40589 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
40590 @@ -245,30 +418,29 @@ int be_cmd_POST(struct be_adapter *adapter)
40591 {
40592 u16 stage;
40593 int status, timeout = 0;
40594 + struct device *dev = &adapter->pdev->dev;
40595
40596 do {
40597 status = be_POST_stage_get(adapter, &stage);
40598 if (status) {
40599 - dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
40600 - stage);
40601 + dev_err(dev, "POST error; stage=0x%x\n", stage);
40602 return -1;
40603 } else if (stage != POST_STAGE_ARMFW_RDY) {
40604 set_current_state(TASK_INTERRUPTIBLE);
40605 - schedule_timeout(2 * HZ);
40606 + if (schedule_timeout(2 * HZ)) {
40607 + dev_err(dev, "POST cmd aborted\n");
40608 + return -EINTR;
40609 + }
40610 timeout += 2;
40611 } else {
40612 return 0;
40613 }
40614 - } while (timeout < 20);
40615 + } while (timeout < 40);
40616
40617 - dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
40618 + dev_err(dev, "POST timeout; stage=0x%x\n", stage);
40619 return -1;
40620 }
40621
40622 -static inline void *embedded_payload(struct be_mcc_wrb *wrb)
40623 -{
40624 - return wrb->payload.embedded_payload;
40625 -}
40626
40627 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
40628 {
40629 @@ -277,7 +449,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
40630
40631 /* Don't touch the hdr after it's prepared */
40632 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
40633 - bool embedded, u8 sge_cnt)
40634 + bool embedded, u8 sge_cnt, u32 opcode)
40635 {
40636 if (embedded)
40637 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
40638 @@ -285,7 +457,8 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
40639 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
40640 MCC_WRB_SGE_CNT_SHIFT;
40641 wrb->payload_length = payload_len;
40642 - be_dws_cpu_to_le(wrb, 20);
40643 + wrb->tag0 = opcode;
40644 + be_dws_cpu_to_le(wrb, 8);
40645 }
40646
40647 /* Don't touch the hdr after it's prepared */
40648 @@ -295,6 +468,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
40649 req_hdr->opcode = opcode;
40650 req_hdr->subsystem = subsystem;
40651 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
40652 + req_hdr->version = 0;
40653 }
40654
40655 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
40656 @@ -349,7 +523,11 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
40657 struct be_queue_info *mccq = &adapter->mcc_obj.q;
40658 struct be_mcc_wrb *wrb;
40659
40660 - BUG_ON(atomic_read(&mccq->used) >= mccq->len);
40661 + if (atomic_read(&mccq->used) >= mccq->len) {
40662 + dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
40663 + return NULL;
40664 + }
40665 +
40666 wrb = queue_head_node(mccq);
40667 queue_head_inc(mccq);
40668 atomic_inc(&mccq->used);
40669 @@ -357,6 +535,59 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
40670 return wrb;
40671 }
40672
40673 +/* Tell fw we're about to start firing cmds by writing a
40674 + * special pattern across the wrb hdr; uses mbox
40675 + */
40676 +int be_cmd_fw_init(struct be_adapter *adapter)
40677 +{
40678 + u8 *wrb;
40679 + int status;
40680 +
40681 + if (mutex_lock_interruptible(&adapter->mbox_lock))
40682 + return -1;
40683 +
40684 + wrb = (u8 *)wrb_from_mbox(adapter);
40685 + *wrb++ = 0xFF;
40686 + *wrb++ = 0x12;
40687 + *wrb++ = 0x34;
40688 + *wrb++ = 0xFF;
40689 + *wrb++ = 0xFF;
40690 + *wrb++ = 0x56;
40691 + *wrb++ = 0x78;
40692 + *wrb = 0xFF;
40693 +
40694 + status = be_mbox_notify_wait(adapter);
40695 +
40696 + mutex_unlock(&adapter->mbox_lock);
40697 + return status;
40698 +}
40699 +
40700 +/* Tell fw we're done with firing cmds by writing a
40701 + * special pattern across the wrb hdr; uses mbox
40702 + */
40703 +int be_cmd_fw_clean(struct be_adapter *adapter)
40704 +{
40705 + u8 *wrb;
40706 + int status;
40707 +
40708 + if (mutex_lock_interruptible(&adapter->mbox_lock))
40709 + return -1;
40710 +
40711 + wrb = (u8 *)wrb_from_mbox(adapter);
40712 + *wrb++ = 0xFF;
40713 + *wrb++ = 0xAA;
40714 + *wrb++ = 0xBB;
40715 + *wrb++ = 0xFF;
40716 + *wrb++ = 0xFF;
40717 + *wrb++ = 0xCC;
40718 + *wrb++ = 0xDD;
40719 + *wrb = 0xFF;
40720 +
40721 + status = be_mbox_notify_wait(adapter);
40722 +
40723 + mutex_unlock(&adapter->mbox_lock);
40724 + return status;
40725 +}
40726 int be_cmd_eq_create(struct be_adapter *adapter,
40727 struct be_queue_info *eq, int eq_delay)
40728 {
40729 @@ -365,20 +596,19 @@ int be_cmd_eq_create(struct be_adapter *adapter,
40730 struct be_dma_mem *q_mem = &eq->dma_mem;
40731 int status;
40732
40733 - spin_lock(&adapter->mbox_lock);
40734 + if (mutex_lock_interruptible(&adapter->mbox_lock))
40735 + return -1;
40736
40737 wrb = wrb_from_mbox(adapter);
40738 req = embedded_payload(wrb);
40739
40740 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40741 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
40742
40743 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40744 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
40745
40746 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40747
40748 - AMAP_SET_BITS(struct amap_eq_context, func, req->context,
40749 - be_pci_func(adapter));
40750 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
40751 /* 4byte eqe*/
40752 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
40753 @@ -397,7 +627,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
40754 eq->created = true;
40755 }
40756
40757 - spin_unlock(&adapter->mbox_lock);
40758 + mutex_unlock(&adapter->mbox_lock);
40759 return status;
40760 }
40761
40762 @@ -409,12 +639,14 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
40763 struct be_cmd_req_mac_query *req;
40764 int status;
40765
40766 - spin_lock(&adapter->mbox_lock);
40767 + if (mutex_lock_interruptible(&adapter->mbox_lock))
40768 + return -1;
40769
40770 wrb = wrb_from_mbox(adapter);
40771 req = embedded_payload(wrb);
40772
40773 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40774 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40775 + OPCODE_COMMON_NTWK_MAC_QUERY);
40776
40777 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40778 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
40779 @@ -433,13 +665,13 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
40780 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
40781 }
40782
40783 - spin_unlock(&adapter->mbox_lock);
40784 + mutex_unlock(&adapter->mbox_lock);
40785 return status;
40786 }
40787
40788 /* Uses synchronous MCCQ */
40789 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40790 - u32 if_id, u32 *pmac_id)
40791 + u32 if_id, u32 *pmac_id, u32 domain)
40792 {
40793 struct be_mcc_wrb *wrb;
40794 struct be_cmd_req_pmac_add *req;
40795 @@ -448,13 +680,19 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40796 spin_lock_bh(&adapter->mcc_lock);
40797
40798 wrb = wrb_from_mccq(adapter);
40799 + if (!wrb) {
40800 + status = -EBUSY;
40801 + goto err;
40802 + }
40803 req = embedded_payload(wrb);
40804
40805 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40806 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40807 + OPCODE_COMMON_NTWK_PMAC_ADD);
40808
40809 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40810 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
40811
40812 + req->hdr.domain = domain;
40813 req->if_id = cpu_to_le32(if_id);
40814 memcpy(req->mac_address, mac_addr, ETH_ALEN);
40815
40816 @@ -464,12 +702,13 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40817 *pmac_id = le32_to_cpu(resp->pmac_id);
40818 }
40819
40820 +err:
40821 spin_unlock_bh(&adapter->mcc_lock);
40822 return status;
40823 }
40824
40825 /* Uses synchronous MCCQ */
40826 -int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
40827 +int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
40828 {
40829 struct be_mcc_wrb *wrb;
40830 struct be_cmd_req_pmac_del *req;
40831 @@ -478,20 +717,26 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
40832 spin_lock_bh(&adapter->mcc_lock);
40833
40834 wrb = wrb_from_mccq(adapter);
40835 + if (!wrb) {
40836 + status = -EBUSY;
40837 + goto err;
40838 + }
40839 req = embedded_payload(wrb);
40840
40841 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40842 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40843 + OPCODE_COMMON_NTWK_PMAC_DEL);
40844
40845 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40846 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
40847
40848 + req->hdr.domain = dom;
40849 req->if_id = cpu_to_le32(if_id);
40850 req->pmac_id = cpu_to_le32(pmac_id);
40851
40852 status = be_mcc_notify_wait(adapter);
40853
40854 +err:
40855 spin_unlock_bh(&adapter->mcc_lock);
40856 -
40857 return status;
40858 }
40859
40860 @@ -506,29 +751,51 @@ int be_cmd_cq_create(struct be_adapter *adapter,
40861 void *ctxt;
40862 int status;
40863
40864 - spin_lock(&adapter->mbox_lock);
40865 + if (mutex_lock_interruptible(&adapter->mbox_lock))
40866 + return -1;
40867
40868 wrb = wrb_from_mbox(adapter);
40869 req = embedded_payload(wrb);
40870 ctxt = &req->context;
40871
40872 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40873 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40874 + OPCODE_COMMON_CQ_CREATE);
40875
40876 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40877 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
40878
40879 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40880
40881 - AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
40882 - AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
40883 - AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
40884 - __ilog2_u32(cq->len/256));
40885 - AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
40886 - AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
40887 - AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
40888 - AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
40889 - AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
40890 - AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
40891 + if (lancer_chip(adapter)) {
40892 + req->hdr.version = 2;
40893 + req->page_size = 1; /* 1 for 4K */
40894 + AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
40895 + coalesce_wm);
40896 + AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
40897 + no_delay);
40898 + AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
40899 + __ilog2_u32(cq->len/256));
40900 + AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
40901 + AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
40902 + ctxt, 1);
40903 + AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
40904 + ctxt, eq->id);
40905 + AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
40906 + } else {
40907 + AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
40908 + coalesce_wm);
40909 + AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
40910 + ctxt, no_delay);
40911 + AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
40912 + __ilog2_u32(cq->len/256));
40913 + AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
40914 + AMAP_SET_BITS(struct amap_cq_context_be, solevent,
40915 + ctxt, sol_evts);
40916 + AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
40917 + AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
40918 + AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
40919 + }
40920 +
40921 be_dws_cpu_to_le(ctxt, sizeof(req->context));
40922
40923 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40924 @@ -540,8 +807,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
40925 cq->created = true;
40926 }
40927
40928 - spin_unlock(&adapter->mbox_lock);
40929 -
40930 + mutex_unlock(&adapter->mbox_lock);
40931 return status;
40932 }
40933
40934 @@ -553,7 +819,68 @@ static u32 be_encoded_q_len(int q_len)
40935 return len_encoded;
40936 }
40937
40938 -int be_cmd_mccq_create(struct be_adapter *adapter,
40939 +int be_cmd_mccq_ext_create(struct be_adapter *adapter,
40940 + struct be_queue_info *mccq,
40941 + struct be_queue_info *cq)
40942 +{
40943 + struct be_mcc_wrb *wrb;
40944 + struct be_cmd_req_mcc_ext_create *req;
40945 + struct be_dma_mem *q_mem = &mccq->dma_mem;
40946 + void *ctxt;
40947 + int status;
40948 +
40949 + if (mutex_lock_interruptible(&adapter->mbox_lock))
40950 + return -1;
40951 +
40952 + wrb = wrb_from_mbox(adapter);
40953 + req = embedded_payload(wrb);
40954 + ctxt = &req->context;
40955 +
40956 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40957 + OPCODE_COMMON_MCC_CREATE_EXT);
40958 +
40959 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40960 + OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
40961 +
40962 + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40963 + if (lancer_chip(adapter)) {
40964 + req->hdr.version = 1;
40965 + req->cq_id = cpu_to_le16(cq->id);
40966 +
40967 + AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
40968 + be_encoded_q_len(mccq->len));
40969 + AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
40970 + AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
40971 + ctxt, cq->id);
40972 + AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
40973 + ctxt, 1);
40974 +
40975 + } else {
40976 + AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
40977 + AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
40978 + be_encoded_q_len(mccq->len));
40979 + AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
40980 + }
40981 +
40982 + /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
40983 + req->async_event_bitmap[0] |= cpu_to_le32(0x00000022);
40984 +
40985 + be_dws_cpu_to_le(ctxt, sizeof(req->context));
40986 +
40987 + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40988 +
40989 + status = be_mbox_notify_wait(adapter);
40990 + if (!status) {
40991 + struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
40992 + mccq->id = le16_to_cpu(resp->id);
40993 + mccq->created = true;
40994 + }
40995 +
40996 + mutex_unlock(&adapter->mbox_lock);
40997 + return status;
40998 +}
40999 +
41000 +int be_cmd_mccq_org_create(struct be_adapter *adapter,
41001 struct be_queue_info *mccq,
41002 struct be_queue_info *cq)
41003 {
41004 @@ -563,24 +890,25 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
41005 void *ctxt;
41006 int status;
41007
41008 - spin_lock(&adapter->mbox_lock);
41009 + if (mutex_lock_interruptible(&adapter->mbox_lock))
41010 + return -1;
41011
41012 wrb = wrb_from_mbox(adapter);
41013 req = embedded_payload(wrb);
41014 ctxt = &req->context;
41015
41016 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41017 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41018 + OPCODE_COMMON_MCC_CREATE);
41019
41020 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41021 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
41022
41023 - req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41024 + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
41025
41026 - AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
41027 - AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
41028 - AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
41029 - be_encoded_q_len(mccq->len));
41030 - AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
41031 + AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
41032 + AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
41033 + be_encoded_q_len(mccq->len));
41034 + AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
41035
41036 be_dws_cpu_to_le(ctxt, sizeof(req->context));
41037
41038 @@ -592,75 +920,93 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
41039 mccq->id = le16_to_cpu(resp->id);
41040 mccq->created = true;
41041 }
41042 - spin_unlock(&adapter->mbox_lock);
41043
41044 + mutex_unlock(&adapter->mbox_lock);
41045 return status;
41046 }
41047
41048 -int be_cmd_txq_create(struct be_adapter *adapter,
41049 - struct be_queue_info *txq,
41050 +int be_cmd_mccq_create(struct be_adapter *adapter,
41051 + struct be_queue_info *mccq,
41052 struct be_queue_info *cq)
41053 {
41054 + int status;
41055 +
41056 + status = be_cmd_mccq_ext_create(adapter, mccq, cq);
41057 + if (status && !lancer_chip(adapter)) {
41058 + dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
41059 + "or newer to avoid conflicting priorities between NIC "
41060 + "and FCoE traffic");
41061 + status = be_cmd_mccq_org_create(adapter, mccq, cq);
41062 + }
41063 + return status;
41064 +}
41065 +
41066 +int be_cmd_txq_create(struct be_adapter *adapter, struct be_queue_info *txq,
41067 + struct be_queue_info *cq, u8 *tc_id)
41068 +{
41069 struct be_mcc_wrb *wrb;
41070 struct be_cmd_req_eth_tx_create *req;
41071 struct be_dma_mem *q_mem = &txq->dma_mem;
41072 - void *ctxt;
41073 int status;
41074
41075 - spin_lock(&adapter->mbox_lock);
41076 + if (mutex_lock_interruptible(&adapter->mbox_lock))
41077 + return -1;
41078
41079 wrb = wrb_from_mbox(adapter);
41080 req = embedded_payload(wrb);
41081 - ctxt = &req->context;
41082 -
41083 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41084
41085 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_TX_CREATE);
41086 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
41087 sizeof(*req));
41088
41089 - req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41090 + if (adapter->flags & BE_FLAGS_DCBX || lancer_chip(adapter)) {
41091 + req->hdr.version = 1;
41092 + req->if_id = cpu_to_le16(adapter->if_handle);
41093 + }
41094 + if (adapter->flags & BE_FLAGS_DCBX)
41095 + req->type = cpu_to_le16(ETX_QUEUE_TYPE_PRIORITY);
41096 + else
41097 + req->type = cpu_to_le16(ETX_QUEUE_TYPE_STANDARD);
41098 req->ulp_num = BE_ULP1_NUM;
41099 - req->type = BE_ETH_TX_RING_TYPE_STANDARD;
41100 -
41101 - AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
41102 - be_encoded_q_len(txq->len));
41103 - AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
41104 - be_pci_func(adapter));
41105 - AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
41106 - AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
41107 -
41108 - be_dws_cpu_to_le(ctxt, sizeof(req->context));
41109 -
41110 + req->cq_id = cpu_to_le16(cq->id);
41111 + req->queue_size = be_encoded_q_len(txq->len);
41112 + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41113 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
41114
41115 status = be_mbox_notify_wait(adapter);
41116 if (!status) {
41117 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
41118 txq->id = le16_to_cpu(resp->cid);
41119 + if (adapter->flags & BE_FLAGS_DCBX)
41120 + *tc_id = resp->tc_id;
41121 txq->created = true;
41122 }
41123
41124 - spin_unlock(&adapter->mbox_lock);
41125 -
41126 + mutex_unlock(&adapter->mbox_lock);
41127 return status;
41128 }
41129
41130 -/* Uses mbox */
41131 +/* Uses MCC */
41132 int be_cmd_rxq_create(struct be_adapter *adapter,
41133 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
41134 - u16 max_frame_size, u32 if_id, u32 rss)
41135 + u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
41136 {
41137 struct be_mcc_wrb *wrb;
41138 struct be_cmd_req_eth_rx_create *req;
41139 struct be_dma_mem *q_mem = &rxq->dma_mem;
41140 int status;
41141
41142 - spin_lock(&adapter->mbox_lock);
41143 + spin_lock_bh(&adapter->mcc_lock);
41144
41145 - wrb = wrb_from_mbox(adapter);
41146 + wrb = wrb_from_mccq(adapter);
41147 + if (!wrb) {
41148 + status = -EBUSY;
41149 + goto err;
41150 + }
41151 req = embedded_payload(wrb);
41152
41153 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41154 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41155 + OPCODE_ETH_RX_CREATE);
41156
41157 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
41158 sizeof(*req));
41159 @@ -673,15 +1019,16 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
41160 req->max_frame_size = cpu_to_le16(max_frame_size);
41161 req->rss_queue = cpu_to_le32(rss);
41162
41163 - status = be_mbox_notify_wait(adapter);
41164 + status = be_mcc_notify_wait(adapter);
41165 if (!status) {
41166 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
41167 rxq->id = le16_to_cpu(resp->id);
41168 rxq->created = true;
41169 + *rss_id = resp->rss_id;
41170 }
41171
41172 - spin_unlock(&adapter->mbox_lock);
41173 -
41174 +err:
41175 + spin_unlock_bh(&adapter->mcc_lock);
41176 return status;
41177 }
41178
41179 @@ -696,13 +1043,12 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41180 u8 subsys = 0, opcode = 0;
41181 int status;
41182
41183 - spin_lock(&adapter->mbox_lock);
41184 + if (mutex_lock_interruptible(&adapter->mbox_lock))
41185 + return -1;
41186
41187 wrb = wrb_from_mbox(adapter);
41188 req = embedded_payload(wrb);
41189
41190 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41191 -
41192 switch (queue_type) {
41193 case QTYPE_EQ:
41194 subsys = CMD_SUBSYSTEM_COMMON;
41195 @@ -727,13 +1073,47 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41196 default:
41197 BUG();
41198 }
41199 +
41200 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
41201 +
41202 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
41203 req->id = cpu_to_le16(q->id);
41204
41205 status = be_mbox_notify_wait(adapter);
41206 + if (!status)
41207 + q->created = false;
41208
41209 - spin_unlock(&adapter->mbox_lock);
41210 + mutex_unlock(&adapter->mbox_lock);
41211 + return status;
41212 +}
41213
41214 +/* Uses MCC */
41215 +int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
41216 +{
41217 + struct be_mcc_wrb *wrb;
41218 + struct be_cmd_req_q_destroy *req;
41219 + int status;
41220 +
41221 + spin_lock_bh(&adapter->mcc_lock);
41222 +
41223 + wrb = wrb_from_mccq(adapter);
41224 + if (!wrb) {
41225 + status = -EBUSY;
41226 + goto err;
41227 + }
41228 + req = embedded_payload(wrb);
41229 +
41230 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
41231 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
41232 + sizeof(*req));
41233 + req->id = cpu_to_le16(q->id);
41234 +
41235 + status = be_mcc_notify_wait(adapter);
41236 + if (!status)
41237 + q->created = false;
41238 +
41239 +err:
41240 + spin_unlock_bh(&adapter->mcc_lock);
41241 return status;
41242 }
41243
41244 @@ -741,22 +1121,26 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41245 * Uses mbox
41246 */
41247 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
41248 - u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
41249 + u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
41250 + u32 domain)
41251 {
41252 struct be_mcc_wrb *wrb;
41253 struct be_cmd_req_if_create *req;
41254 int status;
41255
41256 - spin_lock(&adapter->mbox_lock);
41257 + if (mutex_lock_interruptible(&adapter->mbox_lock))
41258 + return -1;
41259
41260 wrb = wrb_from_mbox(adapter);
41261 req = embedded_payload(wrb);
41262
41263 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41264 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41265 + OPCODE_COMMON_NTWK_INTERFACE_CREATE);
41266
41267 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41268 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
41269
41270 + req->hdr.domain = domain;
41271 req->capability_flags = cpu_to_le32(cap_flags);
41272 req->enable_flags = cpu_to_le32(en_flags);
41273 req->pmac_invalid = pmac_invalid;
41274 @@ -771,33 +1155,35 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
41275 *pmac_id = le32_to_cpu(resp->pmac_id);
41276 }
41277
41278 - spin_unlock(&adapter->mbox_lock);
41279 + mutex_unlock(&adapter->mbox_lock);
41280 return status;
41281 }
41282
41283 /* Uses mbox */
41284 -int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
41285 +int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
41286 {
41287 struct be_mcc_wrb *wrb;
41288 struct be_cmd_req_if_destroy *req;
41289 int status;
41290
41291 - spin_lock(&adapter->mbox_lock);
41292 + if (mutex_lock_interruptible(&adapter->mbox_lock))
41293 + return -1;
41294
41295 wrb = wrb_from_mbox(adapter);
41296 req = embedded_payload(wrb);
41297
41298 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41299 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41300 + OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
41301
41302 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41303 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
41304
41305 + req->hdr.domain = domain;
41306 req->interface_id = cpu_to_le32(interface_id);
41307
41308 status = be_mbox_notify_wait(adapter);
41309
41310 - spin_unlock(&adapter->mbox_lock);
41311 -
41312 + mutex_unlock(&adapter->mbox_lock);
41313 return status;
41314 }
41315
41316 @@ -808,33 +1194,48 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
41317 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
41318 {
41319 struct be_mcc_wrb *wrb;
41320 - struct be_cmd_req_get_stats *req;
41321 + struct be_cmd_req_hdr *hdr;
41322 struct be_sge *sge;
41323 + int status = 0;
41324 +
41325 + if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
41326 + be_cmd_get_die_temperature(adapter);
41327
41328 spin_lock_bh(&adapter->mcc_lock);
41329
41330 wrb = wrb_from_mccq(adapter);
41331 - req = nonemb_cmd->va;
41332 + if (!wrb) {
41333 + status = -EBUSY;
41334 + goto err;
41335 + }
41336 + hdr = nonemb_cmd->va;
41337 sge = nonembedded_sgl(wrb);
41338
41339 - be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
41340 - wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
41341 + be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
41342 + OPCODE_ETH_GET_STATISTICS);
41343
41344 - be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41345 - OPCODE_ETH_GET_STATISTICS, sizeof(*req));
41346 + be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
41347 + OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
41348 +
41349 + if (adapter->generation == BE_GEN3)
41350 + hdr->version = 1;
41351 +
41352 + wrb->tag1 = CMD_SUBSYSTEM_ETH;
41353 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
41354 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
41355 sge->len = cpu_to_le32(nonemb_cmd->size);
41356
41357 be_mcc_notify(adapter);
41358 + adapter->stats_cmd_sent = true;
41359
41360 +err:
41361 spin_unlock_bh(&adapter->mcc_lock);
41362 - return 0;
41363 + return status;
41364 }
41365
41366 /* Uses synchronous mcc */
41367 int be_cmd_link_status_query(struct be_adapter *adapter,
41368 - bool *link_up)
41369 + int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom)
41370 {
41371 struct be_mcc_wrb *wrb;
41372 struct be_cmd_req_link_status *req;
41373 @@ -843,50 +1244,216 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
41374 spin_lock_bh(&adapter->mcc_lock);
41375
41376 wrb = wrb_from_mccq(adapter);
41377 + if (!wrb) {
41378 + status = -EBUSY;
41379 + goto err;
41380 + }
41381 req = embedded_payload(wrb);
41382
41383 - *link_up = false;
41384 + *link_status = LINK_DOWN;
41385
41386 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41387 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41388 + OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
41389
41390 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41391 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
41392
41393 + req->hdr.domain = dom;
41394 +
41395 status = be_mcc_notify_wait(adapter);
41396 if (!status) {
41397 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
41398 - if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
41399 - *link_up = true;
41400 + if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
41401 + *link_status = LINK_UP;
41402 + *link_speed = le16_to_cpu(resp->link_speed);
41403 + *mac_speed = resp->mac_speed;
41404 + }
41405 }
41406
41407 +err:
41408 spin_unlock_bh(&adapter->mcc_lock);
41409 return status;
41410 }
41411
41412 -/* Uses Mbox */
41413 -int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
41414 +/* Uses synchronous mcc */
41415 +int be_cmd_get_die_temperature(struct be_adapter *adapter)
41416 +{
41417 + struct be_mcc_wrb *wrb;
41418 + struct be_cmd_req_get_cntl_addnl_attribs *req;
41419 + u16 mccq_index;
41420 + int status;
41421 +
41422 + spin_lock_bh(&adapter->mcc_lock);
41423 +
41424 + mccq_index = adapter->mcc_obj.q.head;
41425 +
41426 + wrb = wrb_from_mccq(adapter);
41427 + if (!wrb) {
41428 + status = -EBUSY;
41429 + goto err;
41430 + }
41431 + req = embedded_payload(wrb);
41432 +
41433 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41434 + OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
41435 +
41436 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41437 + OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
41438 +
41439 + wrb->tag1 = mccq_index;
41440 +
41441 + be_mcc_notify(adapter);
41442 +
41443 +err:
41444 + spin_unlock_bh(&adapter->mcc_lock);
41445 + return status;
41446 +}
41447 +
41448 +
41449 +/* Uses synchronous mcc */
41450 +int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
41451 +{
41452 + struct be_mcc_wrb *wrb;
41453 + struct be_cmd_req_get_fat *req;
41454 + int status;
41455 +
41456 + spin_lock_bh(&adapter->mcc_lock);
41457 +
41458 + wrb = wrb_from_mccq(adapter);
41459 + if (!wrb) {
41460 + status = -EBUSY;
41461 + goto err;
41462 + }
41463 + req = embedded_payload(wrb);
41464 +
41465 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41466 + OPCODE_COMMON_MANAGE_FAT);
41467 +
41468 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41469 + OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
41470 + req->fat_operation = cpu_to_le32(QUERY_FAT);
41471 + status = be_mcc_notify_wait(adapter);
41472 + if (!status) {
41473 + struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
41474 + if (log_size && resp->log_size)
41475 + *log_size = le32_to_cpu(resp->log_size) -
41476 + sizeof(u32);
41477 + }
41478 +err:
41479 + spin_unlock_bh(&adapter->mcc_lock);
41480 + return status;
41481 +}
41482 +
41483 +void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
41484 +{
41485 + struct be_dma_mem get_fat_cmd;
41486 + struct be_mcc_wrb *wrb;
41487 + struct be_cmd_req_get_fat *req;
41488 + struct be_sge *sge;
41489 + u32 offset = 0, total_size, buf_size,
41490 + log_offset = sizeof(u32), payload_len;
41491 + int status;
41492 +
41493 + if (buf_len == 0)
41494 + return;
41495 +
41496 + total_size = buf_len;
41497 +
41498 + get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
41499 + get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
41500 + get_fat_cmd.size,
41501 + &get_fat_cmd.dma);
41502 + if (!get_fat_cmd.va) {
41503 + status = -ENOMEM;
41504 + dev_err(&adapter->pdev->dev,
41505 + "Memory allocation failure while retrieving FAT data\n");
41506 + return;
41507 + }
41508 +
41509 + spin_lock_bh(&adapter->mcc_lock);
41510 +
41511 + while (total_size) {
41512 + buf_size = min(total_size, (u32)60*1024);
41513 + total_size -= buf_size;
41514 +
41515 + wrb = wrb_from_mccq(adapter);
41516 + if (!wrb) {
41517 + status = -EBUSY;
41518 + goto err;
41519 + }
41520 + req = get_fat_cmd.va;
41521 + sge = nonembedded_sgl(wrb);
41522 +
41523 + payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
41524 + be_wrb_hdr_prepare(wrb, payload_len, false, 1,
41525 + OPCODE_COMMON_MANAGE_FAT);
41526 +
41527 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41528 + OPCODE_COMMON_MANAGE_FAT, payload_len);
41529 +
41530 + sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
41531 + sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
41532 + sge->len = cpu_to_le32(get_fat_cmd.size);
41533 +
41534 + req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
41535 + req->read_log_offset = cpu_to_le32(log_offset);
41536 + req->read_log_length = cpu_to_le32(buf_size);
41537 + req->data_buffer_size = cpu_to_le32(buf_size);
41538 +
41539 + status = be_mcc_notify_wait(adapter);
41540 + if (!status) {
41541 + struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
41542 + memcpy(buf + offset,
41543 + resp->data_buffer,
41544 + le32_to_cpu(resp->read_log_length));
41545 + } else {
41546 + dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
41547 + goto err;
41548 + }
41549 + offset += buf_size;
41550 + log_offset += buf_size;
41551 + }
41552 +err:
41553 + pci_free_consistent(adapter->pdev, get_fat_cmd.size,
41554 + get_fat_cmd.va,
41555 + get_fat_cmd.dma);
41556 + spin_unlock_bh(&adapter->mcc_lock);
41557 +}
41558 +
41559 +/* Uses synchronous mcc */
41560 +int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
41561 + char *fw_on_flash)
41562 {
41563 struct be_mcc_wrb *wrb;
41564 struct be_cmd_req_get_fw_version *req;
41565 int status;
41566
41567 - spin_lock(&adapter->mbox_lock);
41568 + spin_lock_bh(&adapter->mcc_lock);
41569 +
41570 + wrb = wrb_from_mccq(adapter);
41571 + if (!wrb) {
41572 + status = -EBUSY;
41573 + goto err;
41574 + }
41575
41576 - wrb = wrb_from_mbox(adapter);
41577 req = embedded_payload(wrb);
41578
41579 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41580 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41581 + OPCODE_COMMON_GET_FW_VERSION);
41582
41583 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41584 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
41585
41586 - status = be_mbox_notify_wait(adapter);
41587 + status = be_mcc_notify_wait(adapter);
41588 if (!status) {
41589 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
41590 - strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
41591 + strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN-1);
41592 + if (fw_on_flash)
41593 + strncpy(fw_on_flash, resp->fw_on_flash_version_string,
41594 + FW_VER_LEN-1);
41595 }
41596 -
41597 - spin_unlock(&adapter->mbox_lock);
41598 +err:
41599 + spin_unlock_bh(&adapter->mcc_lock);
41600 return status;
41601 }
41602
41603 @@ -897,13 +1464,19 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
41604 {
41605 struct be_mcc_wrb *wrb;
41606 struct be_cmd_req_modify_eq_delay *req;
41607 + int status = 0;
41608
41609 spin_lock_bh(&adapter->mcc_lock);
41610
41611 wrb = wrb_from_mccq(adapter);
41612 + if (!wrb) {
41613 + status = -EBUSY;
41614 + goto err;
41615 + }
41616 req = embedded_payload(wrb);
41617
41618 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41619 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41620 + OPCODE_COMMON_MODIFY_EQ_DELAY);
41621
41622 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41623 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
41624 @@ -915,8 +1488,9 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
41625
41626 be_mcc_notify(adapter);
41627
41628 +err:
41629 spin_unlock_bh(&adapter->mcc_lock);
41630 - return 0;
41631 + return status;
41632 }
41633
41634 /* Uses sycnhronous mcc */
41635 @@ -930,9 +1504,14 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
41636 spin_lock_bh(&adapter->mcc_lock);
41637
41638 wrb = wrb_from_mccq(adapter);
41639 + if (!wrb) {
41640 + status = -EBUSY;
41641 + goto err;
41642 + }
41643 req = embedded_payload(wrb);
41644
41645 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41646 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41647 + OPCODE_COMMON_NTWK_VLAN_CONFIG);
41648
41649 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41650 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
41651 @@ -948,79 +1527,63 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
41652
41653 status = be_mcc_notify_wait(adapter);
41654
41655 +err:
41656 spin_unlock_bh(&adapter->mcc_lock);
41657 return status;
41658 }
41659
41660 -/* Uses MCC for this command as it may be called in BH context
41661 - * Uses synchronous mcc
41662 - */
41663 -int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
41664 +int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
41665 {
41666 struct be_mcc_wrb *wrb;
41667 - struct be_cmd_req_promiscuous_config *req;
41668 + struct be_dma_mem *mem = &adapter->rx_filter;
41669 + struct be_cmd_req_rx_filter *req = mem->va;
41670 + struct be_sge *sge;
41671 int status;
41672
41673 spin_lock_bh(&adapter->mcc_lock);
41674
41675 wrb = wrb_from_mccq(adapter);
41676 - req = embedded_payload(wrb);
41677 -
41678 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41679 -
41680 - be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41681 - OPCODE_ETH_PROMISCUOUS, sizeof(*req));
41682 -
41683 - if (port_num)
41684 - req->port1_promiscuous = en;
41685 - else
41686 - req->port0_promiscuous = en;
41687 -
41688 - status = be_mcc_notify_wait(adapter);
41689 -
41690 - spin_unlock_bh(&adapter->mcc_lock);
41691 - return status;
41692 -}
41693 -
41694 -/*
41695 - * Uses MCC for this command as it may be called in BH context
41696 - * (mc == NULL) => multicast promiscous
41697 - */
41698 -int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
41699 - struct dev_mc_list *mc_list, u32 mc_count)
41700 -{
41701 -#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
41702 - struct be_mcc_wrb *wrb;
41703 - struct be_cmd_req_mcast_mac_config *req;
41704 -
41705 - spin_lock_bh(&adapter->mcc_lock);
41706 -
41707 - wrb = wrb_from_mccq(adapter);
41708 - req = embedded_payload(wrb);
41709 -
41710 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41711 -
41712 + if (!wrb) {
41713 + status = -EBUSY;
41714 + goto err;
41715 + }
41716 + sge = nonembedded_sgl(wrb);
41717 + sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
41718 + sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
41719 + sge->len = cpu_to_le32(mem->size);
41720 + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
41721 + OPCODE_COMMON_NTWK_RX_FILTER);
41722 +
41723 + memset(req, 0, sizeof(*req));
41724 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41725 - OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
41726 + OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
41727
41728 - req->interface_id = if_id;
41729 - if (mc_list && mc_count <= BE_MAX_MC) {
41730 - int i;
41731 - struct dev_mc_list *mc;
41732 -
41733 - req->num_mac = cpu_to_le16(mc_count);
41734 -
41735 - for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
41736 - memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
41737 + req->if_id = cpu_to_le32(adapter->if_handle);
41738 + if (flags & IFF_PROMISC) {
41739 + req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
41740 + BE_IF_FLAGS_VLAN_PROMISCUOUS);
41741 + if (value == ON)
41742 + req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
41743 + BE_IF_FLAGS_VLAN_PROMISCUOUS);
41744 + } else if (flags & IFF_ALLMULTI) {
41745 + req->if_flags_mask = req->if_flags =
41746 + cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
41747 } else {
41748 - req->promiscuous = 1;
41749 - }
41750 + struct netdev_hw_addr *ha;
41751 + int i = 0;
41752
41753 - be_mcc_notify_wait(adapter);
41754 + req->if_flags_mask = req->if_flags =
41755 + cpu_to_le32(BE_IF_FLAGS_MULTICAST);
41756 + req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
41757 + netdev_for_each_mc_addr(ha, adapter->netdev)
41758 + memcpy(req->mcast_mac[i++].byte, ha->DMI_ADDR,
41759 + ETH_ALEN);
41760 + }
41761 + status = be_mcc_notify_wait(adapter);
41762
41763 +err:
41764 spin_unlock_bh(&adapter->mcc_lock);
41765 -
41766 - return 0;
41767 + return status;
41768 }
41769
41770 /* Uses synchrounous mcc */
41771 @@ -1033,9 +1596,14 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
41772 spin_lock_bh(&adapter->mcc_lock);
41773
41774 wrb = wrb_from_mccq(adapter);
41775 + if (!wrb) {
41776 + status = -EBUSY;
41777 + goto err;
41778 + }
41779 req = embedded_payload(wrb);
41780
41781 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41782 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41783 + OPCODE_COMMON_SET_FLOW_CONTROL);
41784
41785 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41786 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
41787 @@ -1045,6 +1613,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
41788
41789 status = be_mcc_notify_wait(adapter);
41790
41791 +err:
41792 spin_unlock_bh(&adapter->mcc_lock);
41793 return status;
41794 }
41795 @@ -1059,9 +1628,14 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
41796 spin_lock_bh(&adapter->mcc_lock);
41797
41798 wrb = wrb_from_mccq(adapter);
41799 + if (!wrb) {
41800 + status = -EBUSY;
41801 + goto err;
41802 + }
41803 req = embedded_payload(wrb);
41804
41805 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41806 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41807 + OPCODE_COMMON_GET_FLOW_CONTROL);
41808
41809 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41810 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
41811 @@ -1074,23 +1648,27 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
41812 *rx_fc = le16_to_cpu(resp->rx_flow_control);
41813 }
41814
41815 +err:
41816 spin_unlock_bh(&adapter->mcc_lock);
41817 return status;
41818 }
41819
41820 /* Uses mbox */
41821 -int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
41822 +int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
41823 + u32 *mode, u32 *function_caps)
41824 {
41825 struct be_mcc_wrb *wrb;
41826 struct be_cmd_req_query_fw_cfg *req;
41827 int status;
41828
41829 - spin_lock(&adapter->mbox_lock);
41830 + if (mutex_lock_interruptible(&adapter->mbox_lock))
41831 + return -1;
41832
41833 wrb = wrb_from_mbox(adapter);
41834 req = embedded_payload(wrb);
41835
41836 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41837 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41838 + OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
41839
41840 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41841 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
41842 @@ -1099,10 +1677,11 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
41843 if (!status) {
41844 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
41845 *port_num = le32_to_cpu(resp->phys_port);
41846 - *cap = le32_to_cpu(resp->function_cap);
41847 + *mode = le32_to_cpu(resp->function_mode);
41848 + *function_caps = le32_to_cpu(resp->function_caps);
41849 }
41850
41851 - spin_unlock(&adapter->mbox_lock);
41852 + mutex_unlock(&adapter->mbox_lock);
41853 return status;
41854 }
41855
41856 @@ -1113,19 +1692,161 @@ int be_cmd_reset_function(struct be_adapter *adapter)
41857 struct be_cmd_req_hdr *req;
41858 int status;
41859
41860 - spin_lock(&adapter->mbox_lock);
41861 + if (mutex_lock_interruptible(&adapter->mbox_lock))
41862 + return -1;
41863
41864 wrb = wrb_from_mbox(adapter);
41865 req = embedded_payload(wrb);
41866
41867 - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41868 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41869 + OPCODE_COMMON_FUNCTION_RESET);
41870
41871 be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
41872 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
41873
41874 status = be_mbox_notify_wait(adapter);
41875
41876 - spin_unlock(&adapter->mbox_lock);
41877 + mutex_unlock(&adapter->mbox_lock);
41878 + return status;
41879 +}
41880 +
41881 +int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
41882 +{
41883 + struct be_mcc_wrb *wrb;
41884 + struct be_cmd_req_rss_config *req;
41885 + u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
41886 + 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
41887 + int status;
41888 +
41889 + if (mutex_lock_interruptible(&adapter->mbox_lock))
41890 + return -1;
41891 +
41892 + wrb = wrb_from_mbox(adapter);
41893 + req = embedded_payload(wrb);
41894 +
41895 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41896 + OPCODE_ETH_RSS_CONFIG);
41897 +
41898 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41899 + OPCODE_ETH_RSS_CONFIG, sizeof(*req));
41900 +
41901 + req->if_id = cpu_to_le32(adapter->if_handle);
41902 + req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
41903 + req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
41904 + memcpy(req->cpu_table, rsstable, table_size);
41905 + memcpy(req->hash, myhash, sizeof(myhash));
41906 + be_dws_cpu_to_le(req->hash, sizeof(req->hash));
41907 +
41908 + status = be_mbox_notify_wait(adapter);
41909 +
41910 + mutex_unlock(&adapter->mbox_lock);
41911 + return status;
41912 +}
41913 +
41914 +/* Uses sync mcc */
41915 +int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
41916 + u8 bcn, u8 sts, u8 state)
41917 +{
41918 + struct be_mcc_wrb *wrb;
41919 + struct be_cmd_req_enable_disable_beacon *req;
41920 + int status;
41921 +
41922 + spin_lock_bh(&adapter->mcc_lock);
41923 +
41924 + wrb = wrb_from_mccq(adapter);
41925 + if (!wrb) {
41926 + status = -EBUSY;
41927 + goto err;
41928 + }
41929 + req = embedded_payload(wrb);
41930 +
41931 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41932 + OPCODE_COMMON_ENABLE_DISABLE_BEACON);
41933 +
41934 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41935 + OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
41936 +
41937 + req->port_num = port_num;
41938 + req->beacon_state = state;
41939 + req->beacon_duration = bcn;
41940 + req->status_duration = sts;
41941 +
41942 + status = be_mcc_notify_wait(adapter);
41943 +
41944 +err:
41945 + spin_unlock_bh(&adapter->mcc_lock);
41946 + return status;
41947 +}
41948 +
41949 +/* Uses sync mcc */
41950 +int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
41951 +{
41952 + struct be_mcc_wrb *wrb;
41953 + struct be_cmd_req_get_beacon_state *req;
41954 + int status;
41955 +
41956 + spin_lock_bh(&adapter->mcc_lock);
41957 +
41958 + wrb = wrb_from_mccq(adapter);
41959 + if (!wrb) {
41960 + status = -EBUSY;
41961 + goto err;
41962 + }
41963 + req = embedded_payload(wrb);
41964 +
41965 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41966 + OPCODE_COMMON_GET_BEACON_STATE);
41967 +
41968 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41969 + OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
41970 +
41971 + req->port_num = port_num;
41972 +
41973 + status = be_mcc_notify_wait(adapter);
41974 + if (!status) {
41975 + struct be_cmd_resp_get_beacon_state *resp =
41976 + embedded_payload(wrb);
41977 + *state = resp->beacon_state;
41978 + }
41979 +
41980 +err:
41981 + spin_unlock_bh(&adapter->mcc_lock);
41982 + return status;
41983 +}
41984 +
41985 +/* Uses sync mcc */
41986 +int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
41987 + u8 *connector)
41988 +{
41989 + struct be_mcc_wrb *wrb;
41990 + struct be_cmd_req_port_type *req;
41991 + int status;
41992 +
41993 + spin_lock_bh(&adapter->mcc_lock);
41994 +
41995 + wrb = wrb_from_mccq(adapter);
41996 + if (!wrb) {
41997 + status = -EBUSY;
41998 + goto err;
41999 + }
42000 + req = embedded_payload(wrb);
42001 +
42002 + be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
42003 + OPCODE_COMMON_READ_TRANSRECV_DATA);
42004 +
42005 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42006 + OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
42007 +
42008 + req->port = cpu_to_le32(port);
42009 + req->page_num = cpu_to_le32(TR_PAGE_A0);
42010 + status = be_mcc_notify_wait(adapter);
42011 + if (!status) {
42012 + struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
42013 + *connector = resp->data.connector;
42014 + }
42015 +
42016 +err:
42017 + spin_unlock_bh(&adapter->mcc_lock);
42018 return status;
42019 }
42020
42021 @@ -1133,16 +1854,24 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
42022 u32 flash_type, u32 flash_opcode, u32 buf_size)
42023 {
42024 struct be_mcc_wrb *wrb;
42025 - struct be_cmd_write_flashrom *req = cmd->va;
42026 + struct be_cmd_write_flashrom *req;
42027 struct be_sge *sge;
42028 int status;
42029
42030 spin_lock_bh(&adapter->mcc_lock);
42031 + adapter->flash_status = 0;
42032
42033 wrb = wrb_from_mccq(adapter);
42034 + if (!wrb) {
42035 + status = -EBUSY;
42036 + goto err_unlock;
42037 + }
42038 + req = cmd->va;
42039 sge = nonembedded_sgl(wrb);
42040
42041 - be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
42042 + be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
42043 + OPCODE_COMMON_WRITE_FLASHROM);
42044 + wrb->tag1 = CMD_SUBSYSTEM_COMMON;
42045
42046 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42047 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
42048 @@ -1154,8 +1883,852 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
42049 req->params.op_code = cpu_to_le32(flash_opcode);
42050 req->params.data_buf_size = cpu_to_le32(buf_size);
42051
42052 + be_mcc_notify(adapter);
42053 + spin_unlock_bh(&adapter->mcc_lock);
42054 +
42055 + if (!wait_for_completion_timeout(&adapter->flash_compl,
42056 + msecs_to_jiffies(40000)))
42057 + status = -1;
42058 + else
42059 + status = adapter->flash_status;
42060 +
42061 + return status;
42062 +
42063 +err_unlock:
42064 + spin_unlock_bh(&adapter->mcc_lock);
42065 + return status;
42066 +}
42067 +
42068 +int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
42069 + int offset)
42070 +{
42071 + struct be_mcc_wrb *wrb;
42072 + struct be_cmd_write_flashrom *req;
42073 + int status;
42074 +
42075 + spin_lock_bh(&adapter->mcc_lock);
42076 +
42077 + wrb = wrb_from_mccq(adapter);
42078 + if (!wrb) {
42079 + status = -EBUSY;
42080 + goto err;
42081 + }
42082 + req = embedded_payload(wrb);
42083 +
42084 + be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
42085 + OPCODE_COMMON_READ_FLASHROM);
42086 +
42087 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42088 + OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
42089 +
42090 + req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
42091 + req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
42092 + req->params.offset = cpu_to_le32(offset);
42093 + req->params.data_buf_size = cpu_to_le32(0x4);
42094 +
42095 + status = be_mcc_notify_wait(adapter);
42096 + if (!status)
42097 + memcpy(flashed_crc, req->params.data_buf, 4);
42098 +
42099 +err:
42100 + spin_unlock_bh(&adapter->mcc_lock);
42101 + return status;
42102 +}
42103 +
42104 +int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
42105 + struct be_dma_mem *nonemb_cmd)
42106 +{
42107 + struct be_mcc_wrb *wrb;
42108 + struct be_cmd_req_acpi_wol_magic_config *req;
42109 + struct be_sge *sge;
42110 + int status;
42111 +
42112 + spin_lock_bh(&adapter->mcc_lock);
42113 +
42114 + wrb = wrb_from_mccq(adapter);
42115 + if (!wrb) {
42116 + status = -EBUSY;
42117 + goto err;
42118 + }
42119 + req = nonemb_cmd->va;
42120 + sge = nonembedded_sgl(wrb);
42121 +
42122 + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42123 + OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
42124 +
42125 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
42126 + OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
42127 + memcpy(req->magic_mac, mac, ETH_ALEN);
42128 +
42129 + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
42130 + sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
42131 + sge->len = cpu_to_le32(nonemb_cmd->size);
42132 +
42133 + status = be_mcc_notify_wait(adapter);
42134 +
42135 +err:
42136 + spin_unlock_bh(&adapter->mcc_lock);
42137 + return status;
42138 +}
42139 +
42140 +int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
42141 + u8 loopback_type, u8 enable)
42142 +{
42143 + struct be_mcc_wrb *wrb;
42144 + struct be_cmd_req_set_lmode *req;
42145 + int status;
42146 +
42147 + spin_lock_bh(&adapter->mcc_lock);
42148 +
42149 + wrb = wrb_from_mccq(adapter);
42150 + if (!wrb) {
42151 + status = -EBUSY;
42152 + goto err;
42153 + }
42154 +
42155 + req = embedded_payload(wrb);
42156 +
42157 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42158 + OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
42159 +
42160 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42161 + OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
42162 + sizeof(*req));
42163 +
42164 + req->src_port = port_num;
42165 + req->dest_port = port_num;
42166 + req->loopback_type = loopback_type;
42167 + req->loopback_state = enable;
42168 +
42169 + status = be_mcc_notify_wait(adapter);
42170 +err:
42171 + spin_unlock_bh(&adapter->mcc_lock);
42172 + return status;
42173 +}
42174 +
42175 +int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
42176 + u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
42177 +{
42178 + struct be_mcc_wrb *wrb;
42179 + struct be_cmd_req_loopback_test *req;
42180 + int status;
42181 +
42182 + spin_lock_bh(&adapter->mcc_lock);
42183 +
42184 + wrb = wrb_from_mccq(adapter);
42185 + if (!wrb) {
42186 + status = -EBUSY;
42187 + goto err;
42188 + }
42189 +
42190 + req = embedded_payload(wrb);
42191 +
42192 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42193 + OPCODE_LOWLEVEL_LOOPBACK_TEST);
42194 +
42195 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42196 + OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
42197 + req->hdr.timeout = cpu_to_le32(4);
42198 +
42199 + req->pattern = cpu_to_le64(pattern);
42200 + req->src_port = cpu_to_le32(port_num);
42201 + req->dest_port = cpu_to_le32(port_num);
42202 + req->pkt_size = cpu_to_le32(pkt_size);
42203 + req->num_pkts = cpu_to_le32(num_pkts);
42204 + req->loopback_type = cpu_to_le32(loopback_type);
42205 +
42206 + status = be_mcc_notify_wait(adapter);
42207 + if (!status) {
42208 + struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
42209 + status = le32_to_cpu(resp->status);
42210 + }
42211 +
42212 +err:
42213 + spin_unlock_bh(&adapter->mcc_lock);
42214 + return status;
42215 +}
42216 +
42217 +int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
42218 + u32 byte_cnt, struct be_dma_mem *cmd)
42219 +{
42220 + struct be_mcc_wrb *wrb;
42221 + struct be_cmd_req_ddrdma_test *req;
42222 + struct be_sge *sge;
42223 + int status;
42224 + int i, j = 0;
42225 +
42226 + spin_lock_bh(&adapter->mcc_lock);
42227 +
42228 + wrb = wrb_from_mccq(adapter);
42229 + if (!wrb) {
42230 + status = -EBUSY;
42231 + goto err;
42232 + }
42233 + req = cmd->va;
42234 + sge = nonembedded_sgl(wrb);
42235 + be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
42236 + OPCODE_LOWLEVEL_HOST_DDR_DMA);
42237 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42238 + OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
42239 +
42240 + sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
42241 + sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
42242 + sge->len = cpu_to_le32(cmd->size);
42243 +
42244 + req->pattern = cpu_to_le64(pattern);
42245 + req->byte_count = cpu_to_le32(byte_cnt);
42246 + for (i = 0; i < byte_cnt; i++) {
42247 + req->snd_buff[i] = (u8)(pattern >> (j*8));
42248 + j++;
42249 + if (j > 7)
42250 + j = 0;
42251 + }
42252 +
42253 + status = be_mcc_notify_wait(adapter);
42254 +
42255 + if (!status) {
42256 + struct be_cmd_resp_ddrdma_test *resp;
42257 + resp = cmd->va;
42258 + if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
42259 + resp->snd_err) {
42260 + status = -1;
42261 + }
42262 + }
42263 +
42264 +err:
42265 + spin_unlock_bh(&adapter->mcc_lock);
42266 + return status;
42267 +}
42268 +
42269 +int be_cmd_get_seeprom_data(struct be_adapter *adapter,
42270 + struct be_dma_mem *nonemb_cmd)
42271 +{
42272 + struct be_mcc_wrb *wrb;
42273 + struct be_cmd_req_seeprom_read *req;
42274 + struct be_sge *sge;
42275 + int status;
42276 +
42277 + spin_lock_bh(&adapter->mcc_lock);
42278 +
42279 + wrb = wrb_from_mccq(adapter);
42280 + req = nonemb_cmd->va;
42281 + sge = nonembedded_sgl(wrb);
42282 +
42283 + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42284 + OPCODE_COMMON_SEEPROM_READ);
42285 +
42286 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42287 + OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
42288 +
42289 + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
42290 + sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
42291 + sge->len = cpu_to_le32(nonemb_cmd->size);
42292 +
42293 + status = be_mcc_notify_wait(adapter);
42294 +
42295 + spin_unlock_bh(&adapter->mcc_lock);
42296 + return status;
42297 +}
42298 +
42299 +int be_cmd_get_phy_info(struct be_adapter *adapter,
42300 + struct be_phy_info *phy_info)
42301 +{
42302 + struct be_mcc_wrb *wrb;
42303 + struct be_cmd_req_get_phy_info *req;
42304 + struct be_sge *sge;
42305 + struct be_dma_mem cmd;
42306 + struct be_phy_info *resp_phy_info;
42307 + int status;
42308 +
42309 + spin_lock_bh(&adapter->mcc_lock);
42310 + wrb = wrb_from_mccq(adapter);
42311 + if (!wrb) {
42312 + status = -EBUSY;
42313 + goto err;
42314 + }
42315 + cmd.size = sizeof(struct be_cmd_req_get_phy_info);
42316 + cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
42317 + &cmd.dma);
42318 + if (!cmd.va) {
42319 + dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
42320 + status = -ENOMEM;
42321 + goto err;
42322 + }
42323 +
42324 + req = cmd.va;
42325 + sge = nonembedded_sgl(wrb);
42326 +
42327 + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42328 + OPCODE_COMMON_GET_PHY_DETAILS);
42329 +
42330 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42331 + OPCODE_COMMON_GET_PHY_DETAILS,
42332 + sizeof(*req));
42333 +
42334 + sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
42335 + sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
42336 + sge->len = cpu_to_le32(cmd.size);
42337 +
42338 + status = be_mcc_notify_wait(adapter);
42339 + if (!status) {
42340 + resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr);
42341 + phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
42342 + phy_info->interface_type =
42343 + le16_to_cpu(resp_phy_info->interface_type);
42344 + phy_info->auto_speeds_supported =
42345 + le16_to_cpu(resp_phy_info->auto_speeds_supported);
42346 + phy_info->fixed_speeds_supported =
42347 + le16_to_cpu(resp_phy_info->fixed_speeds_supported);
42348 + phy_info->misc_params =
42349 + le32_to_cpu(resp_phy_info->misc_params);
42350 + }
42351 + pci_free_consistent(adapter->pdev, cmd.size,
42352 + cmd.va, cmd.dma);
42353 +err:
42354 + spin_unlock_bh(&adapter->mcc_lock);
42355 + return status;
42356 +}
42357 +
42358 +int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
42359 +{
42360 + struct be_mcc_wrb *wrb;
42361 + struct be_cmd_req_set_qos *req;
42362 + int status;
42363 +
42364 + spin_lock_bh(&adapter->mcc_lock);
42365 +
42366 + wrb = wrb_from_mccq(adapter);
42367 + if (!wrb) {
42368 + status = -EBUSY;
42369 + goto err;
42370 + }
42371 +
42372 + req = embedded_payload(wrb);
42373 +
42374 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42375 + OPCODE_COMMON_SET_QOS);
42376 +
42377 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42378 + OPCODE_COMMON_SET_QOS, sizeof(*req));
42379 +
42380 + req->hdr.domain = domain;
42381 + req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
42382 + req->max_bps_nic = cpu_to_le32(bps);
42383 +
42384 + status = be_mcc_notify_wait(adapter);
42385 +err:
42386 + spin_unlock_bh(&adapter->mcc_lock);
42387 + return status;
42388 +}
42389 +
42390 +int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
42391 +{
42392 + struct be_mcc_wrb *wrb;
42393 + struct be_cmd_req_cntl_attribs *req;
42394 + struct be_cmd_resp_cntl_attribs *resp;
42395 + struct be_sge *sge;
42396 + int status;
42397 + int payload_len = max(sizeof(*req), sizeof(*resp));
42398 + struct mgmt_controller_attrib *attribs;
42399 + struct be_dma_mem attribs_cmd;
42400 +
42401 + memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
42402 + attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
42403 + attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
42404 + &attribs_cmd.dma);
42405 + if (!attribs_cmd.va) {
42406 + dev_err(&adapter->pdev->dev,
42407 + "Memory allocation failure\n");
42408 + return -ENOMEM;
42409 + }
42410 +
42411 + if (mutex_lock_interruptible(&adapter->mbox_lock))
42412 + return -1;
42413 +
42414 + wrb = wrb_from_mbox(adapter);
42415 + if (!wrb) {
42416 + status = -EBUSY;
42417 + goto err;
42418 + }
42419 + req = attribs_cmd.va;
42420 + sge = nonembedded_sgl(wrb);
42421 +
42422 + be_wrb_hdr_prepare(wrb, payload_len, false, 1,
42423 + OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
42424 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42425 + OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
42426 + sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
42427 + sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
42428 + sge->len = cpu_to_le32(attribs_cmd.size);
42429 +
42430 + status = be_mbox_notify_wait(adapter);
42431 + if (!status) {
42432 + attribs = (struct mgmt_controller_attrib *)(attribs_cmd.va +
42433 + sizeof(struct be_cmd_resp_hdr));
42434 + adapter->hba_port_num = attribs->hba_attribs.phy_port;
42435 + strncpy(adapter->model_number,
42436 + attribs->hba_attribs.controller_model_number, 31);
42437 + }
42438 +
42439 +err:
42440 + mutex_unlock(&adapter->mbox_lock);
42441 + pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
42442 + attribs_cmd.dma);
42443 + return status;
42444 +}
42445 +
42446 +/* Uses mbox */
42447 +int be_cmd_req_native_mode(struct be_adapter *adapter)
42448 +{
42449 + struct be_mcc_wrb *wrb;
42450 + struct be_cmd_req_set_func_cap *req;
42451 + int status;
42452 +
42453 + if (mutex_lock_interruptible(&adapter->mbox_lock))
42454 + return -1;
42455 +
42456 + wrb = wrb_from_mbox(adapter);
42457 + if (!wrb) {
42458 + status = -EBUSY;
42459 + goto err;
42460 + }
42461 +
42462 + req = embedded_payload(wrb);
42463 +
42464 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42465 + OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
42466 +
42467 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42468 + OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
42469 +
42470 + req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
42471 + CAPABILITY_BE3_NATIVE_ERX_API);
42472 + req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
42473 +
42474 + status = be_mbox_notify_wait(adapter);
42475 + if (!status) {
42476 + struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
42477 + adapter->be3_native = le32_to_cpu(resp->cap_flags) &
42478 + CAPABILITY_BE3_NATIVE_ERX_API;
42479 + }
42480 +err:
42481 + mutex_unlock(&adapter->mbox_lock);
42482 + return status;
42483 +}
42484 +
42485 +static void encode_port_names(struct be_adapter *adapter)
42486 +{
42487 + switch (adapter->port_name[adapter->hba_port_num]) {
42488 + case '0':
42489 + adapter->port_name[adapter->hba_port_num] = 0;
42490 + break;
42491 + case '1':
42492 + adapter->port_name[adapter->hba_port_num] = 1;
42493 + break;
42494 + case '2':
42495 + adapter->port_name[adapter->hba_port_num] = 2;
42496 + break;
42497 + case '3':
42498 + adapter->port_name[adapter->hba_port_num] = 3;
42499 + break;
42500 + case '4':
42501 + adapter->port_name[adapter->hba_port_num] = 4;
42502 + break;
42503 + case 'A':
42504 + adapter->port_name[adapter->hba_port_num] = 5;
42505 + break;
42506 + case 'B':
42507 + adapter->port_name[adapter->hba_port_num] = 6;
42508 + break;
42509 + case 'C':
42510 + adapter->port_name[adapter->hba_port_num] = 7;
42511 + break;
42512 + case 'D':
42513 + adapter->port_name[adapter->hba_port_num] = 8;
42514 + break;
42515 + }
42516 +}
42517 +
42518 +int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name)
42519 +{
42520 + struct be_mcc_wrb *wrb;
42521 + struct be_cmd_req_get_port_name *req;
42522 + int status;
42523 +
42524 + spin_lock_bh(&adapter->mcc_lock);
42525 +
42526 + wrb = wrb_from_mccq(adapter);
42527 + if (!wrb) {
42528 + status = -EBUSY;
42529 + goto err;
42530 + }
42531 +
42532 + req = embedded_payload(wrb);
42533 +
42534 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42535 + OPCODE_COMMON_GET_PORT_NAME);
42536 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42537 + OPCODE_COMMON_GET_PORT_NAME, sizeof(*req));
42538 +
42539 + status = be_mcc_notify_wait(adapter);
42540 + if (!status) {
42541 + struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
42542 + port_name[0] = resp->port0_name;
42543 + port_name[1] = resp->port1_name;
42544 + }
42545 +
42546 +err:
42547 + spin_unlock_bh(&adapter->mcc_lock);
42548 +
42549 + if(!status)
42550 + encode_port_names(adapter);
42551 + return status;
42552 +}
42553 +
42554 +int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name)
42555 +{
42556 + struct be_mcc_wrb *wrb;
42557 + struct be_cmd_req_get_port_name *req;
42558 + int status;
42559 +
42560 + spin_lock_bh(&adapter->mcc_lock);
42561 +
42562 + wrb = wrb_from_mccq(adapter);
42563 + if (!wrb) {
42564 + status = -EBUSY;
42565 + goto err;
42566 + }
42567 + req = embedded_payload(wrb);
42568 +
42569 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42570 + OPCODE_COMMON_GET_PORT_NAME);
42571 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42572 + OPCODE_COMMON_GET_PORT_NAME, sizeof(*req));
42573 + req->hdr.version = 1;
42574 +
42575 status = be_mcc_notify_wait(adapter);
42576 + if (!status) {
42577 + struct be_cmd_resp_get_port_name_v1 *resp = embedded_payload(wrb);
42578 + port_name[0] = resp->port0_name;
42579 + port_name[1] = resp->port1_name;
42580 + port_name[2] = resp->port2_name;
42581 + port_name[3] = resp->port3_name;
42582 + }
42583 +
42584 +err:
42585 + spin_unlock_bh(&adapter->mcc_lock);
42586 +
42587 + if (!status)
42588 + encode_port_names(adapter);
42589 + return status;
42590 +}
42591 +
42592 +int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs)
42593 +{
42594 + struct be_mcc_wrb *wrb;
42595 + struct be_cmd_req_pg *req;
42596 + int status, num = 0;
42597 + bool query = true;
42598 +
42599 + *fw_num_txqs = MAX_TX_QS;
42600 +
42601 + if (mutex_lock_interruptible(&adapter->mbox_lock))
42602 + return -1;
42603 +
42604 +enable_pfc:
42605 + wrb = wrb_from_mbox(adapter);
42606 + req = embedded_payload(wrb);
42607 +
42608 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42609 + OPCODE_ETH_PG_FEATURE_QUERY_REQUEST);
42610 +
42611 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
42612 + OPCODE_ETH_PG_FEATURE_QUERY_REQUEST, sizeof(*req));
42613 +
42614 + if (query)
42615 + req->query |= cpu_to_le32(REQ_PG_QUERY);
42616 + req->pfc_pg |= cpu_to_le32(REQ_PG_FEAT);
42617 +
42618 + status = be_mbox_notify_wait(adapter);
42619 + if (!status) {
42620 + struct be_cmd_resp_pg *resp = embedded_payload(wrb);
42621 + if (query) {
42622 + if (le32_to_cpu(resp->pfc_pg) & REQ_PG_FEAT) {
42623 + num = le32_to_cpu(resp->num_tx_rings);
42624 + query = false;
42625 + goto enable_pfc;
42626 + }
42627 + } else {
42628 + adapter->flags |= BE_FLAGS_DCBX;
42629 + *fw_num_txqs = num;
42630 + }
42631 + }
42632 +
42633 + mutex_unlock(&adapter->mbox_lock);
42634 + return status;
42635 +}
42636 +
42637 +/* Set privilege(s) for a function */
42638 +int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 mask, u32 *prev,
42639 + u32 domain)
42640 +{
42641 + struct be_mcc_wrb *wrb;
42642 + struct be_cmd_req_set_fn_privileges *req;
42643 + int status;
42644 +
42645 + spin_lock_bh(&adapter->mcc_lock);
42646 +
42647 + wrb = wrb_from_mccq(adapter);
42648 + if (!wrb) {
42649 + status = -EBUSY;
42650 + goto err;
42651 + }
42652 +
42653 + req = embedded_payload(wrb);
42654 +
42655 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42656 + OPCODE_COMMON_SET_FN_PRIVILEGES);
42657 +
42658 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42659 + OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req));
42660 +
42661 + req->hdr.domain = domain;
42662 + req->privilege_mask = cpu_to_le32(mask);
42663 +
42664 + status = be_mcc_notify_wait(adapter);
42665 +
42666 +err:
42667 + spin_unlock_bh(&adapter->mcc_lock);
42668 + return status;
42669 +}
42670 +
42671 +/* Get privilege(s) for a function */
42672 +int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
42673 + u32 domain)
42674 +{
42675 + struct be_mcc_wrb *wrb;
42676 + struct be_cmd_req_get_fn_privileges *req;
42677 + int status;
42678 +
42679 + spin_lock_bh(&adapter->mcc_lock);
42680 +
42681 + wrb = wrb_from_mccq(adapter);
42682 + if (!wrb) {
42683 + status = -EBUSY;
42684 + goto err;
42685 + }
42686 +
42687 + req = embedded_payload(wrb);
42688 +
42689 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42690 + OPCODE_COMMON_GET_FN_PRIVILEGES);
42691
42692 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42693 + OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req));
42694 +
42695 + req->hdr.domain = domain;
42696 +
42697 + status = be_mcc_notify_wait(adapter);
42698 + if (!status) {
42699 + struct be_cmd_resp_get_fn_privileges *resp =
42700 + embedded_payload(wrb);
42701 + *privilege = le32_to_cpu(resp->privilege_mask);
42702 + } else
42703 + *privilege = 0;
42704 +
42705 +err:
42706 + spin_unlock_bh(&adapter->mcc_lock);
42707 + return status;
42708 +}
42709 +
42710 +/* Set Hyper switch config */
42711 +int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
42712 + u32 domain, u16 intf_id)
42713 +{
42714 + struct be_mcc_wrb *wrb;
42715 + struct be_cmd_req_set_hsw_config *req;
42716 + void *ctxt;
42717 + int status;
42718 +
42719 + spin_lock_bh(&adapter->mcc_lock);
42720 +
42721 + wrb = wrb_from_mccq(adapter);
42722 + if (!wrb) {
42723 + status = -EBUSY;
42724 + goto err;
42725 + }
42726 +
42727 + req = embedded_payload(wrb);
42728 + ctxt = &req->context;
42729 +
42730 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42731 + OPCODE_COMMON_SET_HSW_CONFIG);
42732 +
42733 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42734 + OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req));
42735 +
42736 + req->hdr.domain = domain;
42737 + AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
42738 + if (pvid) {
42739 + AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
42740 + AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
42741 + }
42742 +
42743 + be_dws_cpu_to_le(req->context, sizeof(req->context));
42744 + status = be_mcc_notify_wait(adapter);
42745 +
42746 +err:
42747 + spin_unlock_bh(&adapter->mcc_lock);
42748 + return status;
42749 +}
42750 +
42751 +/* Get Hyper switch config */
42752 +int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
42753 + u32 domain, u16 intf_id)
42754 +{
42755 + struct be_mcc_wrb *wrb;
42756 + struct be_cmd_req_get_hsw_config *req;
42757 + void *ctxt;
42758 + int status;
42759 + u16 vid;
42760 +
42761 + spin_lock_bh(&adapter->mcc_lock);
42762 +
42763 + wrb = wrb_from_mccq(adapter);
42764 + if (!wrb) {
42765 + status = -EBUSY;
42766 + goto err;
42767 + }
42768 +
42769 + req = embedded_payload(wrb);
42770 + ctxt = &req->context;
42771 +
42772 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42773 + OPCODE_COMMON_GET_HSW_CONFIG);
42774 +
42775 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42776 + OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req));
42777 +
42778 + req->hdr.domain = domain;
42779 + AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
42780 + intf_id);
42781 + AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
42782 + be_dws_cpu_to_le(req->context, sizeof(req->context));
42783 +
42784 + status = be_mcc_notify_wait(adapter);
42785 + if (!status) {
42786 + struct be_cmd_resp_get_hsw_config *resp =
42787 + embedded_payload(wrb);
42788 + be_dws_le_to_cpu(&resp->context,
42789 + sizeof(resp->context));
42790 + vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
42791 + pvid, &resp->context);
42792 + *pvid = le16_to_cpu(vid);
42793 + }
42794 +
42795 +err:
42796 + spin_unlock_bh(&adapter->mcc_lock);
42797 + return status;
42798 +}
42799 +
42800 +int be_cmd_get_port_speed(struct be_adapter *adapter,
42801 + u8 port_num, u16 *dac_cable_len, u16 *port_speed)
42802 +{
42803 + struct be_mcc_wrb *wrb;
42804 + struct be_cmd_req_get_port_speed *req;
42805 + int status = 0;
42806 +
42807 + spin_lock_bh(&adapter->mcc_lock);
42808 +
42809 + wrb = wrb_from_mccq(adapter);
42810 + if (!wrb) {
42811 + status = -EBUSY;
42812 + goto err;
42813 + }
42814 +
42815 + req = embedded_payload(wrb);
42816 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42817 + OPCODE_COMMON_NTWK_GET_LINK_SPEED);
42818 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42819 + OPCODE_COMMON_NTWK_GET_LINK_SPEED,
42820 + sizeof(*req));
42821 + req->port_num = port_num;
42822 + status = be_mcc_notify_wait(adapter);
42823 + if (!status) {
42824 + struct be_cmd_resp_get_port_speed *resp =
42825 + embedded_payload(wrb);
42826 + *dac_cable_len = resp->dac_cable_length;
42827 + *port_speed = resp->mac_speed;
42828 + }
42829 +
42830 +err:
42831 + spin_unlock_bh(&adapter->mcc_lock);
42832 + return status;
42833 +}
42834 +
42835 +int be_cmd_set_port_speed_v1(struct be_adapter *adapter,
42836 + u8 port_num, u16 mac_speed,
42837 + u16 dac_cable_len)
42838 +{
42839 + struct be_mcc_wrb *wrb;
42840 + struct be_cmd_req_set_port_speed_v1 *req;
42841 + int status = 0;
42842 +
42843 + spin_lock_bh(&adapter->mcc_lock);
42844 +
42845 + wrb = wrb_from_mccq(adapter);
42846 + if (!wrb) {
42847 + status = -EBUSY;
42848 + goto err;
42849 + }
42850 + req = embedded_payload(wrb);
42851 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42852 + OPCODE_COMMON_NTWK_SET_LINK_SPEED);
42853 +
42854 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42855 + OPCODE_COMMON_NTWK_SET_LINK_SPEED,
42856 + sizeof(*req));
42857 + req->hdr.version=1;
42858 +
42859 + req->port_num = port_num;
42860 + req->virt_port = port_num;
42861 + req->mac_speed = mac_speed;
42862 + req->dac_cable_length = dac_cable_len;
42863 + status = be_mcc_notify_wait(adapter);
42864 +err:
42865 + spin_unlock_bh(&adapter->mcc_lock);
42866 + return status;
42867 +}
42868 +
42869 +
42870 +/* Uses sync mcc */
42871 +#ifdef CONFIG_PALAU
42872 +int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma,
42873 + int req_size, void *va)
42874 +{
42875 + struct be_mcc_wrb *wrb;
42876 + struct be_sge *sge;
42877 + int status;
42878 + struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) va;
42879 +
42880 + spin_lock_bh(&adapter->mcc_lock);
42881 +
42882 + wrb = wrb_from_mccq(adapter);
42883 + if (!wrb) {
42884 + status = -EBUSY;
42885 + goto err;
42886 + }
42887 + sge = nonembedded_sgl(wrb);
42888 +
42889 + be_wrb_hdr_prepare(wrb, req_size, false, 1, hdr->opcode);
42890 + wrb->tag1 = MCC_WRB_PASS_THRU;
42891 + sge->pa_hi = cpu_to_le32(upper_32_bits(dma));
42892 + sge->pa_lo = cpu_to_le32(dma & 0xFFFFFFFF);
42893 + sge->len = cpu_to_le32(req_size);
42894 +
42895 + status = be_mcc_notify_wait(adapter);
42896 +err:
42897 spin_unlock_bh(&adapter->mcc_lock);
42898 return status;
42899 }
42900 +#endif
42901 diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
42902 index ad33d55..35aa5c7 100644
42903 --- a/drivers/net/benet/be_cmds.h
42904 +++ b/drivers/net/benet/be_cmds.h
42905 @@ -1,20 +1,23 @@
42906 /*
42907 - * Copyright (C) 2005 - 2009 ServerEngines
42908 + * Copyright (C) 2005 - 2011 Emulex
42909 * All rights reserved.
42910 *
42911 * This program is free software; you can redistribute it and/or
42912 * modify it under the terms of the GNU General Public License version 2
42913 - * as published by the Free Software Foundation. The full GNU General
42914 + * as published by the Free Software Foundation. The full GNU General
42915 * Public License is included in this distribution in the file called COPYING.
42916 *
42917 * Contact Information:
42918 - * linux-drivers@serverengines.com
42919 + * linux-drivers@emulex.com
42920 *
42921 - * ServerEngines
42922 - * 209 N. Fair Oaks Ave
42923 - * Sunnyvale, CA 94085
42924 + * Emulex
42925 + * 3333 Susan Street
42926 + * Costa Mesa, CA 92626
42927 */
42928
42929 +#ifndef BE_CMDS_H
42930 +#define BE_CMDS_H
42931 +
42932 /*
42933 * The driver sends configuration and managements command requests to the
42934 * firmware in the BE. These requests are communicated to the processor
42935 @@ -29,9 +32,10 @@ struct be_sge {
42936 u32 len;
42937 };
42938
42939 -#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
42940 +#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
42941 #define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
42942 #define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
42943 +#define MCC_WRB_PASS_THRU 0xFF /* this wrb is used for pass thru cmd */
42944 struct be_mcc_wrb {
42945 u32 embedded; /* dword 0 */
42946 u32 payload_length; /* dword 1 */
42947 @@ -44,24 +48,19 @@ struct be_mcc_wrb {
42948 } payload;
42949 };
42950
42951 -#define CQE_FLAGS_VALID_MASK (1 << 31)
42952 -#define CQE_FLAGS_ASYNC_MASK (1 << 30)
42953 -#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
42954 -#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
42955 +#define CQE_FLAGS_VALID_MASK (1 << 31)
42956 +#define CQE_FLAGS_ASYNC_MASK (1 << 30)
42957 +#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
42958 +#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
42959
42960 /* Completion Status */
42961 enum {
42962 - MCC_STATUS_SUCCESS = 0x0,
42963 -/* The client does not have sufficient privileges to execute the command */
42964 - MCC_STATUS_INSUFFICIENT_PRIVILEGES = 0x1,
42965 -/* A parameter in the command was invalid. */
42966 - MCC_STATUS_INVALID_PARAMETER = 0x2,
42967 -/* There are insufficient chip resources to execute the command */
42968 - MCC_STATUS_INSUFFICIENT_RESOURCES = 0x3,
42969 -/* The command is completing because the queue was getting flushed */
42970 - MCC_STATUS_QUEUE_FLUSHING = 0x4,
42971 -/* The command is completing with a DMA error */
42972 - MCC_STATUS_DMA_FAILED = 0x5,
42973 + MCC_STATUS_SUCCESS = 0,
42974 + MCC_STATUS_FAILED = 1,
42975 + MCC_STATUS_ILLEGAL_REQUEST = 2,
42976 + MCC_STATUS_ILLEGAL_FIELD = 3,
42977 + MCC_STATUS_INSUFFICIENT_BUFFER = 4,
42978 + MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
42979 MCC_STATUS_NOT_SUPPORTED = 66
42980 };
42981
42982 @@ -81,15 +80,24 @@ struct be_mcc_compl {
42983 * mcc_compl is interpreted as follows:
42984 */
42985 #define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
42986 +#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */
42987 #define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
42988 +#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
42989 #define ASYNC_EVENT_CODE_LINK_STATE 0x1
42990 +#define ASYNC_EVENT_CODE_GRP_5 0x5
42991 +#define ASYNC_EVENT_QOS_SPEED 0x1
42992 +#define ASYNC_EVENT_COS_PRIORITY 0x2
42993 +#define ASYNC_EVENT_PVID_STATE 0x3
42994 +#define GRP5_TYPE_PRIO_TC_MAP 4
42995 +
42996 struct be_async_event_trailer {
42997 u32 code;
42998 };
42999
43000 enum {
43001 - ASYNC_EVENT_LINK_DOWN = 0x0,
43002 - ASYNC_EVENT_LINK_UP = 0x1
43003 + ASYNC_EVENT_LINK_DOWN = 0x0,
43004 + ASYNC_EVENT_LINK_UP = 0x1,
43005 + ASYNC_EVENT_LOGICAL = 0x2
43006 };
43007
43008 /* When the event code of an async trailer is link-state, the mcc_compl
43009 @@ -101,7 +109,51 @@ struct be_async_event_link_state {
43010 u8 port_duplex;
43011 u8 port_speed;
43012 u8 port_fault;
43013 - u8 rsvd0[7];
43014 + u8 rsvd0;
43015 + u16 qos_link_speed;
43016 + u32 event_tag;
43017 + struct be_async_event_trailer trailer;
43018 +} __packed;
43019 +
43020 +/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
43021 + * the mcc_compl must be interpreted as follows
43022 + */
43023 +struct be_async_event_grp5_qos_link_speed {
43024 + u8 physical_port;
43025 + u8 rsvd[5];
43026 + u16 qos_link_speed;
43027 + u32 event_tag;
43028 + struct be_async_event_trailer trailer;
43029 +} __packed;
43030 +
43031 +/* When the event code of an async trailer is GRP5 and event type is
43032 + * CoS-Priority, the mcc_compl must be interpreted as follows
43033 + */
43034 +struct be_async_event_grp5_cos_priority {
43035 + u8 physical_port;
43036 + u8 available_priority_bmap;
43037 + u8 reco_default_priority;
43038 + u8 valid;
43039 + u8 rsvd0;
43040 + u8 event_tag;
43041 + struct be_async_event_trailer trailer;
43042 +} __packed;
43043 +
43044 +/* When the event code of an async trailer is GRP5 and event type is
43045 + * PVID state, the mcc_compl must be interpreted as follows
43046 + */
43047 +struct be_async_event_grp5_pvid_state {
43048 + u8 enabled;
43049 + u8 rsvd0;
43050 + u16 tag;
43051 + u32 event_tag;
43052 + u32 rsvd1;
43053 + struct be_async_event_trailer trailer;
43054 +} __packed;
43055 +
43056 +/* GRP5 prio-tc-map event */
43057 +struct be_async_event_grp5_prio_tc_map {
43058 + u8 prio_tc_map[8]; /* map[prio] -> tc_id */
43059 struct be_async_event_trailer trailer;
43060 } __packed;
43061
43062 @@ -111,41 +163,68 @@ struct be_mcc_mailbox {
43063 };
43064
43065 #define CMD_SUBSYSTEM_COMMON 0x1
43066 -#define CMD_SUBSYSTEM_ETH 0x3
43067 +#define CMD_SUBSYSTEM_ETH 0x3
43068 +#define CMD_SUBSYSTEM_LOWLEVEL 0xb
43069
43070 #define OPCODE_COMMON_NTWK_MAC_QUERY 1
43071 #define OPCODE_COMMON_NTWK_MAC_SET 2
43072 #define OPCODE_COMMON_NTWK_MULTICAST_SET 3
43073 -#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
43074 +#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
43075 #define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
43076 +#define OPCODE_COMMON_READ_FLASHROM 6
43077 #define OPCODE_COMMON_WRITE_FLASHROM 7
43078 #define OPCODE_COMMON_CQ_CREATE 12
43079 #define OPCODE_COMMON_EQ_CREATE 13
43080 -#define OPCODE_COMMON_MCC_CREATE 21
43081 -#define OPCODE_COMMON_NTWK_RX_FILTER 34
43082 +#define OPCODE_COMMON_MCC_CREATE 21
43083 +#define OPCODE_COMMON_SET_QOS 28
43084 +#define OPCODE_COMMON_MCC_CREATE_EXT 90
43085 +#define OPCODE_COMMON_SEEPROM_READ 30
43086 +#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
43087 +#define OPCODE_COMMON_NTWK_RX_FILTER 34
43088 #define OPCODE_COMMON_GET_FW_VERSION 35
43089 #define OPCODE_COMMON_SET_FLOW_CONTROL 36
43090 #define OPCODE_COMMON_GET_FLOW_CONTROL 37
43091 #define OPCODE_COMMON_SET_FRAME_SIZE 39
43092 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41
43093 #define OPCODE_COMMON_FIRMWARE_CONFIG 42
43094 -#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
43095 -#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
43096 -#define OPCODE_COMMON_MCC_DESTROY 53
43097 -#define OPCODE_COMMON_CQ_DESTROY 54
43098 -#define OPCODE_COMMON_EQ_DESTROY 55
43099 +#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
43100 +#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
43101 +#define OPCODE_COMMON_MCC_DESTROY 53
43102 +#define OPCODE_COMMON_CQ_DESTROY 54
43103 +#define OPCODE_COMMON_EQ_DESTROY 55
43104 +#define OPCODE_COMMON_NTWK_SET_LINK_SPEED 57
43105 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
43106 #define OPCODE_COMMON_NTWK_PMAC_ADD 59
43107 #define OPCODE_COMMON_NTWK_PMAC_DEL 60
43108 #define OPCODE_COMMON_FUNCTION_RESET 61
43109 +#define OPCODE_COMMON_MANAGE_FAT 68
43110 +#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
43111 +#define OPCODE_COMMON_GET_BEACON_STATE 70
43112 +#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
43113 +#define OPCODE_COMMON_GET_PORT_NAME 77
43114 +#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
43115 +#define OPCODE_COMMON_GET_PHY_DETAILS 102
43116 +#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
43117 +#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
43118 +#define OPCODE_COMMON_NTWK_GET_LINK_SPEED 134
43119 +#define OPCODE_COMMON_GET_HSW_CONFIG 152
43120 +#define OPCODE_COMMON_SET_HSW_CONFIG 153
43121 +#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
43122
43123 +#define OPCODE_ETH_RSS_CONFIG 1
43124 #define OPCODE_ETH_ACPI_CONFIG 2
43125 #define OPCODE_ETH_PROMISCUOUS 3
43126 #define OPCODE_ETH_GET_STATISTICS 4
43127 #define OPCODE_ETH_TX_CREATE 7
43128 -#define OPCODE_ETH_RX_CREATE 8
43129 -#define OPCODE_ETH_TX_DESTROY 9
43130 -#define OPCODE_ETH_RX_DESTROY 10
43131 +#define OPCODE_ETH_RX_CREATE 8
43132 +#define OPCODE_ETH_TX_DESTROY 9
43133 +#define OPCODE_ETH_RX_DESTROY 10
43134 +#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
43135 +#define OPCODE_ETH_PG_FEATURE_QUERY_REQUEST 23
43136 +
43137 +#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
43138 +#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
43139 +#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
43140
43141 struct be_cmd_req_hdr {
43142 u8 opcode; /* dword 0 */
43143 @@ -159,7 +238,7 @@ struct be_cmd_req_hdr {
43144 };
43145
43146 #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
43147 -#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
43148 +#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
43149 struct be_cmd_resp_hdr {
43150 u32 info; /* dword 0 */
43151 u32 status; /* dword 1 */
43152 @@ -265,7 +344,7 @@ struct be_cmd_req_pmac_del {
43153 /******************** Create CQ ***************************/
43154 /* Pseudo amap definition in which each bit of the actual structure is defined
43155 * as a byte: used to calculate offset/shift/mask of each field */
43156 -struct amap_cq_context {
43157 +struct amap_cq_context_be {
43158 u8 cidx[11]; /* dword 0*/
43159 u8 rsvd0; /* dword 0*/
43160 u8 coalescwm[2]; /* dword 0*/
43161 @@ -288,11 +367,28 @@ struct amap_cq_context {
43162 u8 rsvd5[32]; /* dword 3*/
43163 } __packed;
43164
43165 +struct amap_cq_context_lancer {
43166 + u8 rsvd0[12]; /* dword 0*/
43167 + u8 coalescwm[2]; /* dword 0*/
43168 + u8 nodelay; /* dword 0*/
43169 + u8 rsvd1[12]; /* dword 0*/
43170 + u8 count[2]; /* dword 0*/
43171 + u8 valid; /* dword 0*/
43172 + u8 rsvd2; /* dword 0*/
43173 + u8 eventable; /* dword 0*/
43174 + u8 eqid[16]; /* dword 1*/
43175 + u8 rsvd3[15]; /* dword 1*/
43176 + u8 armed; /* dword 1*/
43177 + u8 rsvd4[32]; /* dword 2*/
43178 + u8 rsvd5[32]; /* dword 3*/
43179 +} __packed;
43180 +
43181 struct be_cmd_req_cq_create {
43182 struct be_cmd_req_hdr hdr;
43183 u16 num_pages;
43184 - u16 rsvd0;
43185 - u8 context[sizeof(struct amap_cq_context) / 8];
43186 + u8 page_size;
43187 + u8 rsvd0;
43188 + u8 context[sizeof(struct amap_cq_context_be) / 8];
43189 struct phys_addr pages[8];
43190 } __packed;
43191
43192 @@ -302,10 +398,28 @@ struct be_cmd_resp_cq_create {
43193 u16 rsvd0;
43194 } __packed;
43195
43196 +struct be_cmd_req_get_fat {
43197 + struct be_cmd_req_hdr hdr;
43198 + u32 fat_operation;
43199 + u32 read_log_offset;
43200 + u32 read_log_length;
43201 + u32 data_buffer_size;
43202 + u32 data_buffer[1];
43203 +} __packed;
43204 +
43205 +struct be_cmd_resp_get_fat {
43206 + struct be_cmd_resp_hdr hdr;
43207 + u32 log_size;
43208 + u32 read_log_length;
43209 + u32 rsvd[2];
43210 + u32 data_buffer[1];
43211 +} __packed;
43212 +
43213 +
43214 /******************** Create MCCQ ***************************/
43215 /* Pseudo amap definition in which each bit of the actual structure is defined
43216 * as a byte: used to calculate offset/shift/mask of each field */
43217 -struct amap_mcc_context {
43218 +struct amap_mcc_context_be {
43219 u8 con_index[14];
43220 u8 rsvd0[2];
43221 u8 ring_size[4];
43222 @@ -320,11 +434,31 @@ struct amap_mcc_context {
43223 u8 rsvd2[32];
43224 } __packed;
43225
43226 +struct amap_mcc_context_lancer {
43227 + u8 async_cq_id[16];
43228 + u8 ring_size[4];
43229 + u8 rsvd0[12];
43230 + u8 rsvd1[31];
43231 + u8 valid;
43232 + u8 async_cq_valid[1];
43233 + u8 rsvd2[31];
43234 + u8 rsvd3[32];
43235 +} __packed;
43236 +
43237 struct be_cmd_req_mcc_create {
43238 struct be_cmd_req_hdr hdr;
43239 u16 num_pages;
43240 - u16 rsvd0;
43241 - u8 context[sizeof(struct amap_mcc_context) / 8];
43242 + u16 cq_id;
43243 + u8 context[sizeof(struct amap_mcc_context_be) / 8];
43244 + struct phys_addr pages[8];
43245 +} __packed;
43246 +
43247 +struct be_cmd_req_mcc_ext_create {
43248 + struct be_cmd_req_hdr hdr;
43249 + u16 num_pages;
43250 + u16 cq_id;
43251 + u32 async_event_bitmap[1];
43252 + u8 context[sizeof(struct amap_mcc_context_be) / 8];
43253 struct phys_addr pages[8];
43254 } __packed;
43255
43256 @@ -335,49 +469,32 @@ struct be_cmd_resp_mcc_create {
43257 } __packed;
43258
43259 /******************** Create TxQ ***************************/
43260 -#define BE_ETH_TX_RING_TYPE_STANDARD 2
43261 +#define ETX_QUEUE_TYPE_STANDARD 0x2
43262 +#define ETX_QUEUE_TYPE_PRIORITY 0x10
43263 #define BE_ULP1_NUM 1
43264
43265 -/* Pseudo amap definition in which each bit of the actual structure is defined
43266 - * as a byte: used to calculate offset/shift/mask of each field */
43267 -struct amap_tx_context {
43268 - u8 rsvd0[16]; /* dword 0 */
43269 - u8 tx_ring_size[4]; /* dword 0 */
43270 - u8 rsvd1[26]; /* dword 0 */
43271 - u8 pci_func_id[8]; /* dword 1 */
43272 - u8 rsvd2[9]; /* dword 1 */
43273 - u8 ctx_valid; /* dword 1 */
43274 - u8 cq_id_send[16]; /* dword 2 */
43275 - u8 rsvd3[16]; /* dword 2 */
43276 - u8 rsvd4[32]; /* dword 3 */
43277 - u8 rsvd5[32]; /* dword 4 */
43278 - u8 rsvd6[32]; /* dword 5 */
43279 - u8 rsvd7[32]; /* dword 6 */
43280 - u8 rsvd8[32]; /* dword 7 */
43281 - u8 rsvd9[32]; /* dword 8 */
43282 - u8 rsvd10[32]; /* dword 9 */
43283 - u8 rsvd11[32]; /* dword 10 */
43284 - u8 rsvd12[32]; /* dword 11 */
43285 - u8 rsvd13[32]; /* dword 12 */
43286 - u8 rsvd14[32]; /* dword 13 */
43287 - u8 rsvd15[32]; /* dword 14 */
43288 - u8 rsvd16[32]; /* dword 15 */
43289 -} __packed;
43290 -
43291 struct be_cmd_req_eth_tx_create {
43292 struct be_cmd_req_hdr hdr;
43293 u8 num_pages;
43294 u8 ulp_num;
43295 - u8 type;
43296 - u8 bound_port;
43297 - u8 context[sizeof(struct amap_tx_context) / 8];
43298 + u16 type;
43299 + u16 if_id;
43300 + u8 queue_size;
43301 + u8 rsvd1;
43302 + u32 rsvd2;
43303 + u16 cq_id;
43304 + u16 rsvd3;
43305 + u32 rsvd4[13];
43306 struct phys_addr pages[8];
43307 } __packed;
43308
43309 struct be_cmd_resp_eth_tx_create {
43310 struct be_cmd_resp_hdr hdr;
43311 u16 cid;
43312 - u16 rsvd0;
43313 + u16 rid;
43314 + u32 db_offset;
43315 + u8 tc_id;
43316 + u8 rsvd0[3];
43317 } __packed;
43318
43319 /******************** Create RxQ ***************************/
43320 @@ -396,7 +513,7 @@ struct be_cmd_req_eth_rx_create {
43321 struct be_cmd_resp_eth_rx_create {
43322 struct be_cmd_resp_hdr hdr;
43323 u16 id;
43324 - u8 cpu_id;
43325 + u8 rss_id;
43326 u8 rsvd0;
43327 } __packed;
43328
43329 @@ -429,14 +546,15 @@ enum be_if_flags {
43330 BE_IF_FLAGS_VLAN = 0x100,
43331 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
43332 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
43333 - BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
43334 + BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
43335 + BE_IF_FLAGS_MULTICAST = 0x1000
43336 };
43337
43338 /* An RX interface is an object with one or more MAC addresses and
43339 * filtering capabilities. */
43340 struct be_cmd_req_if_create {
43341 struct be_cmd_req_hdr hdr;
43342 - u32 version; /* ignore currntly */
43343 + u32 version; /* ignore currently */
43344 u32 capability_flags;
43345 u32 enable_flags;
43346 u8 mac_addr[ETH_ALEN];
43347 @@ -458,7 +576,7 @@ struct be_cmd_req_if_destroy {
43348 };
43349
43350 /*************** HW Stats Get **********************************/
43351 -struct be_port_rxf_stats {
43352 +struct be_port_rxf_stats_v0 {
43353 u32 rx_bytes_lsd; /* dword 0*/
43354 u32 rx_bytes_msd; /* dword 1*/
43355 u32 rx_total_frames; /* dword 2*/
43356 @@ -527,8 +645,8 @@ struct be_port_rxf_stats {
43357 u32 rx_input_fifo_overflow; /* dword 65*/
43358 };
43359
43360 -struct be_rxf_stats {
43361 - struct be_port_rxf_stats port[2];
43362 +struct be_rxf_stats_v0 {
43363 + struct be_port_rxf_stats_v0 port[2];
43364 u32 rx_drops_no_pbuf; /* dword 132*/
43365 u32 rx_drops_no_txpb; /* dword 133*/
43366 u32 rx_drops_no_erx_descr; /* dword 134*/
43367 @@ -545,31 +663,51 @@ struct be_rxf_stats {
43368 u32 rx_drops_invalid_ring; /* dword 145*/
43369 u32 forwarded_packets; /* dword 146*/
43370 u32 rx_drops_mtu; /* dword 147*/
43371 - u32 rsvd0[15];
43372 + u32 rsvd0[7];
43373 + u32 port0_jabber_events;
43374 + u32 port1_jabber_events;
43375 + u32 rsvd1[6];
43376 };
43377
43378 -struct be_erx_stats {
43379 +struct be_erx_stats_v0 {
43380 u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
43381 - u32 debug_wdma_sent_hold; /* dword 44*/
43382 - u32 debug_wdma_pbfree_sent_hold; /* dword 45*/
43383 - u32 debug_wdma_zerobyte_pbfree_sent_hold; /* dword 46*/
43384 - u32 debug_pmem_pbuf_dealloc; /* dword 47*/
43385 + u32 rsvd[4];
43386 };
43387
43388 -struct be_hw_stats {
43389 - struct be_rxf_stats rxf;
43390 +struct be_pmem_stats {
43391 + u32 eth_red_drops;
43392 + u32 rsvd[5];
43393 +};
43394 +
43395 +struct be_hw_stats_v0 {
43396 + struct be_rxf_stats_v0 rxf;
43397 u32 rsvd[48];
43398 - struct be_erx_stats erx;
43399 + struct be_erx_stats_v0 erx;
43400 + struct be_pmem_stats pmem;
43401 };
43402
43403 -struct be_cmd_req_get_stats {
43404 +struct be_cmd_req_get_stats_v0 {
43405 struct be_cmd_req_hdr hdr;
43406 - u8 rsvd[sizeof(struct be_hw_stats)];
43407 + u8 rsvd[sizeof(struct be_hw_stats_v0)];
43408 };
43409
43410 -struct be_cmd_resp_get_stats {
43411 +struct be_cmd_resp_get_stats_v0 {
43412 struct be_cmd_resp_hdr hdr;
43413 - struct be_hw_stats hw_stats;
43414 + struct be_hw_stats_v0 hw_stats;
43415 +};
43416 +
43417 +struct be_cmd_req_get_cntl_addnl_attribs {
43418 + struct be_cmd_req_hdr hdr;
43419 + u8 rsvd[8];
43420 +};
43421 +
43422 +struct be_cmd_resp_get_cntl_addnl_attribs {
43423 + struct be_cmd_resp_hdr hdr;
43424 + u16 ipl_file_number;
43425 + u8 ipl_file_version;
43426 + u8 rsvd0;
43427 + u8 on_die_temperature; /* in degrees centigrade*/
43428 + u8 rsvd1[3];
43429 };
43430
43431 struct be_cmd_req_vlan_config {
43432 @@ -581,30 +719,22 @@ struct be_cmd_req_vlan_config {
43433 u16 normal_vlan[64];
43434 } __packed;
43435
43436 -struct be_cmd_req_promiscuous_config {
43437 - struct be_cmd_req_hdr hdr;
43438 - u8 port0_promiscuous;
43439 - u8 port1_promiscuous;
43440 - u16 rsvd0;
43441 -} __packed;
43442 -
43443 +/******************** RX FILTER ******************************/
43444 +#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
43445 struct macaddr {
43446 u8 byte[ETH_ALEN];
43447 };
43448
43449 -struct be_cmd_req_mcast_mac_config {
43450 +struct be_cmd_req_rx_filter {
43451 struct be_cmd_req_hdr hdr;
43452 - u16 num_mac;
43453 - u8 promiscuous;
43454 - u8 interface_id;
43455 - struct macaddr mac[32];
43456 -} __packed;
43457 -
43458 -static inline struct be_hw_stats *
43459 -hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd)
43460 -{
43461 - return &cmd->hw_stats;
43462 -}
43463 + u32 global_flags_mask;
43464 + u32 global_flags;
43465 + u32 if_flags_mask;
43466 + u32 if_flags;
43467 + u32 if_id;
43468 + u32 mcast_num;
43469 + struct macaddr mcast_mac[BE_MAX_MC];
43470 +};
43471
43472 /******************** Link Status Query *******************/
43473 struct be_cmd_req_link_status {
43474 @@ -619,13 +749,18 @@ enum {
43475 };
43476
43477 enum {
43478 - PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
43479 + PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
43480 PHY_LINK_SPEED_10MBPS = 0x1,
43481 PHY_LINK_SPEED_100MBPS = 0x2,
43482 PHY_LINK_SPEED_1GBPS = 0x3,
43483 PHY_LINK_SPEED_10GBPS = 0x4
43484 };
43485
43486 +enum {
43487 + LINK_DOWN = 0x0,
43488 + LINK_UP = 0X1
43489 +};
43490 +
43491 struct be_cmd_resp_link_status {
43492 struct be_cmd_resp_hdr hdr;
43493 u8 physical_port;
43494 @@ -634,9 +769,47 @@ struct be_cmd_resp_link_status {
43495 u8 mac_fault;
43496 u8 mgmt_mac_duplex;
43497 u8 mgmt_mac_speed;
43498 - u16 rsvd0;
43499 + u16 link_speed;
43500 + u32 logical_link_status;
43501 } __packed;
43502
43503 +/******************** Port Identification ***************************/
43504 +/* Identifies the type of port attached to NIC */
43505 +struct be_cmd_req_port_type {
43506 + struct be_cmd_req_hdr hdr;
43507 + u32 page_num;
43508 + u32 port;
43509 +};
43510 +
43511 +enum {
43512 + TR_PAGE_A0 = 0xa0,
43513 + TR_PAGE_A2 = 0xa2
43514 +};
43515 +
43516 +struct be_cmd_resp_port_type {
43517 + struct be_cmd_resp_hdr hdr;
43518 + u32 page_num;
43519 + u32 port;
43520 + struct data {
43521 + u8 identifier;
43522 + u8 identifier_ext;
43523 + u8 connector;
43524 + u8 transceiver[8];
43525 + u8 rsvd0[3];
43526 + u8 length_km;
43527 + u8 length_hm;
43528 + u8 length_om1;
43529 + u8 length_om2;
43530 + u8 length_cu;
43531 + u8 length_cu_m;
43532 + u8 vendor_name[16];
43533 + u8 rsvd;
43534 + u8 vendor_oui[3];
43535 + u8 vendor_pn[16];
43536 + u8 vendor_rev[4];
43537 + } data;
43538 +};
43539 +
43540 /******************** Get FW Version *******************/
43541 struct be_cmd_req_get_fw_version {
43542 struct be_cmd_req_hdr hdr;
43543 @@ -686,9 +859,13 @@ struct be_cmd_resp_modify_eq_delay {
43544 } __packed;
43545
43546 /******************** Get FW Config *******************/
43547 +#define FLEX10_MODE 0x400
43548 +#define VNIC_MODE 0x20000
43549 +#define UMC_ENABLED 0x1000000
43550 +
43551 struct be_cmd_req_query_fw_cfg {
43552 struct be_cmd_req_hdr hdr;
43553 - u32 rsvd[30];
43554 + u32 rsvd[31];
43555 };
43556
43557 struct be_cmd_resp_query_fw_cfg {
43558 @@ -696,10 +873,61 @@ struct be_cmd_resp_query_fw_cfg {
43559 u32 be_config_number;
43560 u32 asic_revision;
43561 u32 phys_port;
43562 - u32 function_cap;
43563 + u32 function_mode;
43564 u32 rsvd[26];
43565 + u32 function_caps;
43566 };
43567
43568 +/******************** RSS Config *******************/
43569 +/* RSS types */
43570 +#define RSS_ENABLE_NONE 0x0
43571 +#define RSS_ENABLE_IPV4 0x1
43572 +#define RSS_ENABLE_TCP_IPV4 0x2
43573 +#define RSS_ENABLE_IPV6 0x4
43574 +#define RSS_ENABLE_TCP_IPV6 0x8
43575 +
43576 +struct be_cmd_req_rss_config {
43577 + struct be_cmd_req_hdr hdr;
43578 + u32 if_id;
43579 + u16 enable_rss;
43580 + u16 cpu_table_size_log2;
43581 + u32 hash[10];
43582 + u8 cpu_table[128];
43583 + u8 flush;
43584 + u8 rsvd0[3];
43585 +};
43586 +
43587 +/******************** Port Beacon ***************************/
43588 +
43589 +#define BEACON_STATE_ENABLED 0x1
43590 +#define BEACON_STATE_DISABLED 0x0
43591 +
43592 +struct be_cmd_req_enable_disable_beacon {
43593 + struct be_cmd_req_hdr hdr;
43594 + u8 port_num;
43595 + u8 beacon_state;
43596 + u8 beacon_duration;
43597 + u8 status_duration;
43598 +} __packed;
43599 +
43600 +struct be_cmd_resp_enable_disable_beacon {
43601 + struct be_cmd_resp_hdr resp_hdr;
43602 + u32 rsvd0;
43603 +} __packed;
43604 +
43605 +struct be_cmd_req_get_beacon_state {
43606 + struct be_cmd_req_hdr hdr;
43607 + u8 port_num;
43608 + u8 rsvd0;
43609 + u16 rsvd1;
43610 +} __packed;
43611 +
43612 +struct be_cmd_resp_get_beacon_state {
43613 + struct be_cmd_resp_hdr resp_hdr;
43614 + u8 beacon_state;
43615 + u8 rsvd0[3];
43616 +} __packed;
43617 +
43618 /****************** Firmware Flash ******************/
43619 struct flashrom_params {
43620 u32 op_code;
43621 @@ -714,17 +942,468 @@ struct be_cmd_write_flashrom {
43622 struct flashrom_params params;
43623 };
43624
43625 +/************************ WOL *******************************/
43626 +struct be_cmd_req_acpi_wol_magic_config {
43627 + struct be_cmd_req_hdr hdr;
43628 + u32 rsvd0[145];
43629 + u8 magic_mac[6];
43630 + u8 rsvd2[2];
43631 +} __packed;
43632 +
43633 +/********************** LoopBack test *********************/
43634 +struct be_cmd_req_loopback_test {
43635 + struct be_cmd_req_hdr hdr;
43636 + u32 loopback_type;
43637 + u32 num_pkts;
43638 + u64 pattern;
43639 + u32 src_port;
43640 + u32 dest_port;
43641 + u32 pkt_size;
43642 +};
43643 +
43644 +struct be_cmd_resp_loopback_test {
43645 + struct be_cmd_resp_hdr resp_hdr;
43646 + u32 status;
43647 + u32 num_txfer;
43648 + u32 num_rx;
43649 + u32 miscomp_off;
43650 + u32 ticks_compl;
43651 +};
43652 +
43653 +struct be_cmd_req_set_lmode {
43654 + struct be_cmd_req_hdr hdr;
43655 + u8 src_port;
43656 + u8 dest_port;
43657 + u8 loopback_type;
43658 + u8 loopback_state;
43659 +};
43660 +
43661 +struct be_cmd_resp_set_lmode {
43662 + struct be_cmd_resp_hdr resp_hdr;
43663 + u8 rsvd0[4];
43664 +};
43665 +
43666 +/********************** DDR DMA test *********************/
43667 +struct be_cmd_req_ddrdma_test {
43668 + struct be_cmd_req_hdr hdr;
43669 + u64 pattern;
43670 + u32 byte_count;
43671 + u32 rsvd0;
43672 + u8 snd_buff[4096];
43673 + u8 rsvd1[4096];
43674 +};
43675 +
43676 +struct be_cmd_resp_ddrdma_test {
43677 + struct be_cmd_resp_hdr hdr;
43678 + u64 pattern;
43679 + u32 byte_cnt;
43680 + u32 snd_err;
43681 + u8 rsvd0[4096];
43682 + u8 rcv_buff[4096];
43683 +};
43684 +
43685 +/*********************** SEEPROM Read ***********************/
43686 +
43687 +#define BE_READ_SEEPROM_LEN 1024
43688 +struct be_cmd_req_seeprom_read {
43689 + struct be_cmd_req_hdr hdr;
43690 + u8 rsvd0[BE_READ_SEEPROM_LEN];
43691 +};
43692 +
43693 +struct be_cmd_resp_seeprom_read {
43694 + struct be_cmd_req_hdr hdr;
43695 + u8 seeprom_data[BE_READ_SEEPROM_LEN];
43696 +};
43697 +
43698 +enum {
43699 + PHY_TYPE_CX4_10GB = 0,
43700 + PHY_TYPE_XFP_10GB,
43701 + PHY_TYPE_SFP_1GB,
43702 + PHY_TYPE_SFP_PLUS_10GB,
43703 + PHY_TYPE_KR_10GB,
43704 + PHY_TYPE_KX4_10GB,
43705 + PHY_TYPE_BASET_10GB,
43706 + PHY_TYPE_BASET_1GB,
43707 + PHY_TYPE_BASEX_1GB,
43708 + PHY_TYPE_SGMII,
43709 + PHY_TYPE_DISABLED = 255
43710 +};
43711 +
43712 +#define BE_AN_EN 0x2
43713 +#define BE_PAUSE_SYM_EN 0x80
43714 +
43715 +struct be_cmd_req_get_phy_info {
43716 + struct be_cmd_req_hdr hdr;
43717 + u8 rsvd0[24];
43718 +};
43719 +
43720 +struct be_phy_info {
43721 + u16 phy_type;
43722 + u16 interface_type;
43723 + u32 misc_params;
43724 + u16 ext_phy_details;
43725 + u16 rsvd;
43726 + u16 auto_speeds_supported;
43727 + u16 fixed_speeds_supported;
43728 + u32 future_use[2];
43729 +};
43730 +
43731 +struct be_cmd_resp_get_phy_info {
43732 + struct be_cmd_req_hdr hdr;
43733 + struct be_phy_info phy_info;
43734 +};
43735 +
43736 +/*********************** Set QOS ***********************/
43737 +
43738 +#define BE_QOS_BITS_NIC 1
43739 +
43740 +struct be_cmd_req_set_qos {
43741 + struct be_cmd_req_hdr hdr;
43742 + u32 valid_bits;
43743 + u32 max_bps_nic;
43744 + u32 rsvd[7];
43745 +};
43746 +
43747 +struct be_cmd_resp_set_qos {
43748 + struct be_cmd_resp_hdr hdr;
43749 + u32 rsvd;
43750 +};
43751 +
43752 +/*********************** Controller Attributes ***********************/
43753 +struct be_cmd_req_cntl_attribs {
43754 + struct be_cmd_req_hdr hdr;
43755 +};
43756 +
43757 +struct be_cmd_resp_cntl_attribs {
43758 + struct be_cmd_resp_hdr hdr;
43759 + struct mgmt_controller_attrib attribs;
43760 +};
43761 +
43762 +/******************* get port names ***************/
43763 +struct be_cmd_req_get_port_name {
43764 + struct be_cmd_req_hdr hdr;
43765 + u32 rsvd0;
43766 +};
43767 +
43768 +struct be_cmd_resp_get_port_name {
43769 + struct be_cmd_req_hdr hdr;
43770 + u8 port0_name;
43771 + u8 port1_name;
43772 + u8 rsvd0[2];
43773 +};
43774 +
43775 +struct be_cmd_resp_get_port_name_v1 {
43776 + struct be_cmd_req_hdr hdr;
43777 + u32 pt : 2;
43778 + u32 rsvd0 : 30;
43779 + u8 port0_name;
43780 + u8 port1_name;
43781 + u8 port2_name;
43782 + u8 port3_name;
43783 +};
43784 +
43785 +/*********************** Set driver function ***********************/
43786 +#define CAPABILITY_SW_TIMESTAMPS 2
43787 +#define CAPABILITY_BE3_NATIVE_ERX_API 4
43788 +
43789 +struct be_cmd_req_set_func_cap {
43790 + struct be_cmd_req_hdr hdr;
43791 + u32 valid_cap_flags;
43792 + u32 cap_flags;
43793 + u8 rsvd[212];
43794 +};
43795 +
43796 +struct be_cmd_resp_set_func_cap {
43797 + struct be_cmd_resp_hdr hdr;
43798 + u32 valid_cap_flags;
43799 + u32 cap_flags;
43800 + u8 rsvd[212];
43801 +};
43802 +
43803 +/*********************** PG Query Request ****************************/
43804 +#define REQ_PG_QUERY 0x1
43805 +#define REQ_PG_FEAT 0x1
43806 +struct be_cmd_req_pg {
43807 + struct be_cmd_req_hdr hdr;
43808 + u32 query;
43809 + u32 pfc_pg;
43810 +};
43811 +
43812 +struct be_cmd_resp_pg {
43813 + struct be_cmd_resp_hdr hdr;
43814 + u32 pfc_pg;
43815 + u32 num_tx_rings;
43816 +};
43817 +
43818 +/*********************** Function Privileges ***********************/
43819 +enum {
43820 + BE_PRIV_DEFAULT = 0x1,
43821 + BE_PRIV_LNKQUERY = 0x2,
43822 + BE_PRIV_LNKSTATS = 0x4,
43823 + BE_PRIV_LNKMGMT = 0x8,
43824 + BE_PRIV_LNKDIAG = 0x10,
43825 + BE_PRIV_UTILQUERY = 0x20,
43826 + BE_PRIV_FILTMGMT = 0x40,
43827 + BE_PRIV_IFACEMGMT = 0x80,
43828 + BE_PRIV_VHADM = 0x100,
43829 + BE_PRIV_DEVCFG = 0x200,
43830 + BE_PRIV_DEVSEC = 0x400
43831 +};
43832 +
43833 +struct be_cmd_req_get_fn_privileges {
43834 + struct be_cmd_req_hdr hdr;
43835 + u32 rsvd;
43836 +};
43837 +
43838 +struct be_cmd_resp_get_fn_privileges {
43839 + struct be_cmd_resp_hdr hdr;
43840 + u32 privilege_mask;
43841 +};
43842 +
43843 +struct be_cmd_req_set_fn_privileges {
43844 + struct be_cmd_req_hdr hdr;
43845 + u32 privilege_mask;
43846 +};
43847 +
43848 +struct be_cmd_resp_set_fn_privileges {
43849 + struct be_cmd_resp_hdr hdr;
43850 + u32 prev_privilege_mask;
43851 +};
43852 +
43853 +/*********************** HSW Config ***********************/
43854 +struct amap_set_hsw_context {
43855 + u8 interface_id[16];
43856 + u8 rsvd0[14];
43857 + u8 pvid_valid;
43858 + u8 rsvd1;
43859 + u8 rsvd2[16];
43860 + u8 pvid[16];
43861 + u8 rsvd3[32];
43862 + u8 rsvd4[32];
43863 + u8 rsvd5[32];
43864 +} __packed;
43865 +
43866 +struct be_cmd_req_set_hsw_config {
43867 + struct be_cmd_req_hdr hdr;
43868 + u8 context[sizeof(struct amap_set_hsw_context) / 8];
43869 +} __packed;
43870 +
43871 +struct be_cmd_resp_set_hsw_config {
43872 + struct be_cmd_resp_hdr hdr;
43873 + u32 rsvd;
43874 +};
43875 +
43876 +struct amap_get_hsw_req_context {
43877 + u8 interface_id[16];
43878 + u8 rsvd0[14];
43879 + u8 pvid_valid;
43880 + u8 pport;
43881 +} __packed;
43882 +
43883 +struct amap_get_hsw_resp_context {
43884 + u8 rsvd1[16];
43885 + u8 pvid[16];
43886 + u8 rsvd2[32];
43887 + u8 rsvd3[32];
43888 + u8 rsvd4[32];
43889 +} __packed;
43890 +
43891 +struct be_cmd_req_get_hsw_config {
43892 + struct be_cmd_req_hdr hdr;
43893 + u8 context[sizeof(struct amap_get_hsw_req_context) / 8];
43894 +} __packed;
43895 +
43896 +struct be_cmd_resp_get_hsw_config {
43897 + struct be_cmd_resp_hdr hdr;
43898 + u8 context[sizeof(struct amap_get_hsw_resp_context) / 8];
43899 + u32 rsvd;
43900 +};
43901 +
43902 +/*************** Set speed ********************/
43903 +struct be_cmd_req_set_port_speed_v1 {
43904 + struct be_cmd_req_hdr hdr;
43905 + u8 port_num;
43906 + u8 virt_port;
43907 + u16 mac_speed;
43908 + u16 dac_cable_length;
43909 + u16 rsvd0;
43910 +};
43911 +
43912 +struct be_cmd_resp_set_port_speed_v1 {
43913 + struct be_cmd_resp_hdr hdr;
43914 + u32 rsvd0;
43915 +};
43916 +
43917 +/************** get port speed *******************/
43918 +struct be_cmd_req_get_port_speed {
43919 + struct be_cmd_req_hdr hdr;
43920 + u8 port_num;
43921 +};
43922 +
43923 +struct be_cmd_resp_get_port_speed {
43924 + struct be_cmd_req_hdr hdr;
43925 + u16 mac_speed;
43926 + u16 dac_cable_length;
43927 +};
43928 +
43929 +/*************** HW Stats Get v1 **********************************/
43930 +#define BE_TXP_SW_SZ 48
43931 +struct be_port_rxf_stats_v1 {
43932 + u32 rsvd0[12];
43933 + u32 rx_crc_errors;
43934 + u32 rx_alignment_symbol_errors;
43935 + u32 rx_pause_frames;
43936 + u32 rx_priority_pause_frames;
43937 + u32 rx_control_frames;
43938 + u32 rx_in_range_errors;
43939 + u32 rx_out_range_errors;
43940 + u32 rx_frame_too_long;
43941 + u32 rx_address_match_errors;
43942 + u32 rx_dropped_too_small;
43943 + u32 rx_dropped_too_short;
43944 + u32 rx_dropped_header_too_small;
43945 + u32 rx_dropped_tcp_length;
43946 + u32 rx_dropped_runt;
43947 + u32 rsvd1[10];
43948 + u32 rx_ip_checksum_errs;
43949 + u32 rx_tcp_checksum_errs;
43950 + u32 rx_udp_checksum_errs;
43951 + u32 rsvd2[7];
43952 + u32 rx_switched_unicast_packets;
43953 + u32 rx_switched_multicast_packets;
43954 + u32 rx_switched_broadcast_packets;
43955 + u32 rsvd3[3];
43956 + u32 tx_pauseframes;
43957 + u32 tx_priority_pauseframes;
43958 + u32 tx_controlframes;
43959 + u32 rsvd4[10];
43960 + u32 rxpp_fifo_overflow_drop;
43961 + u32 rx_input_fifo_overflow_drop;
43962 + u32 pmem_fifo_overflow_drop;
43963 + u32 jabber_events;
43964 + u32 rsvd5[3];
43965 +};
43966 +
43967 +
43968 +struct be_rxf_stats_v1 {
43969 + struct be_port_rxf_stats_v1 port[4];
43970 + u32 rsvd0[2];
43971 + u32 rx_drops_no_pbuf;
43972 + u32 rx_drops_no_txpb;
43973 + u32 rx_drops_no_erx_descr;
43974 + u32 rx_drops_no_tpre_descr;
43975 + u32 rsvd1[6];
43976 + u32 rx_drops_too_many_frags;
43977 + u32 rx_drops_invalid_ring;
43978 + u32 forwarded_packets;
43979 + u32 rx_drops_mtu;
43980 + u32 rsvd2[14];
43981 +};
43982 +
43983 +struct be_erx_stats_v1 {
43984 + u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
43985 + u32 rsvd[4];
43986 +};
43987 +
43988 +struct be_hw_stats_v1 {
43989 + struct be_rxf_stats_v1 rxf;
43990 + u32 rsvd0[BE_TXP_SW_SZ];
43991 + struct be_erx_stats_v1 erx;
43992 + struct be_pmem_stats pmem;
43993 + u32 rsvd1[3];
43994 +};
43995 +
43996 +struct be_cmd_req_get_stats_v1 {
43997 + struct be_cmd_req_hdr hdr;
43998 + u8 rsvd[sizeof(struct be_hw_stats_v1)];
43999 +};
44000 +
44001 +struct be_cmd_resp_get_stats_v1 {
44002 + struct be_cmd_resp_hdr hdr;
44003 + struct be_hw_stats_v1 hw_stats;
44004 +};
44005 +
44006 +static inline void *
44007 +hw_stats_from_cmd(struct be_adapter *adapter)
44008 +{
44009 + if (adapter->generation == BE_GEN3) {
44010 + struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
44011 +
44012 + return &cmd->hw_stats;
44013 + } else {
44014 + struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
44015 +
44016 + return &cmd->hw_stats;
44017 + }
44018 +}
44019 +
44020 +static inline void *be_port_rxf_stats_from_cmd(struct be_adapter *adapter)
44021 +{
44022 + if (adapter->generation == BE_GEN3) {
44023 + struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44024 + struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
44025 +
44026 + return &rxf_stats->port[adapter->port_num];
44027 + } else {
44028 + struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44029 + struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
44030 +
44031 + return &rxf_stats->port[adapter->port_num];
44032 + }
44033 +}
44034 +
44035 +static inline void *be_rxf_stats_from_cmd(struct be_adapter *adapter)
44036 +{
44037 + if (adapter->generation == BE_GEN3) {
44038 + struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44039 +
44040 + return &hw_stats->rxf;
44041 + } else {
44042 + struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44043 +
44044 + return &hw_stats->rxf;
44045 + }
44046 +}
44047 +
44048 +static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
44049 +{
44050 + if (adapter->generation == BE_GEN3) {
44051 + struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44052 +
44053 + return &hw_stats->erx;
44054 + } else {
44055 + struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44056 +
44057 + return &hw_stats->erx;
44058 + }
44059 +}
44060 +
44061 +static inline void *be_pmem_stats_from_cmd(struct be_adapter *adapter)
44062 +{
44063 + if (adapter->generation == BE_GEN3) {
44064 + struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44065 +
44066 + return &hw_stats->pmem;
44067 + } else {
44068 + struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44069 +
44070 + return &hw_stats->pmem;
44071 + }
44072 +}
44073 +
44074 extern int be_pci_fnum_get(struct be_adapter *adapter);
44075 extern int be_cmd_POST(struct be_adapter *adapter);
44076 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
44077 u8 type, bool permanent, u32 if_handle);
44078 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
44079 - u32 if_id, u32 *pmac_id);
44080 -extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
44081 + u32 if_id, u32 *pmac_id, u32 domain);
44082 +extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id,
44083 + u32 domain);
44084 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
44085 u32 en_flags, u8 *mac, bool pmac_invalid,
44086 - u32 *if_handle, u32 *pmac_id);
44087 -extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
44088 + u32 *if_handle, u32 *pmac_id, u32 domain);
44089 +extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
44090 + u32 domain);
44091 extern int be_cmd_eq_create(struct be_adapter *adapter,
44092 struct be_queue_info *eq, int eq_delay);
44093 extern int be_cmd_cq_create(struct be_adapter *adapter,
44094 @@ -736,36 +1415,92 @@ extern int be_cmd_mccq_create(struct be_adapter *adapter,
44095 struct be_queue_info *cq);
44096 extern int be_cmd_txq_create(struct be_adapter *adapter,
44097 struct be_queue_info *txq,
44098 - struct be_queue_info *cq);
44099 + struct be_queue_info *cq, u8 *tc_id);
44100 extern int be_cmd_rxq_create(struct be_adapter *adapter,
44101 struct be_queue_info *rxq, u16 cq_id,
44102 u16 frag_size, u16 max_frame_size, u32 if_id,
44103 - u32 rss);
44104 + u32 rss, u8 *rss_id);
44105 extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
44106 int type);
44107 +extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
44108 + struct be_queue_info *q);
44109 extern int be_cmd_link_status_query(struct be_adapter *adapter,
44110 - bool *link_up);
44111 + int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom);
44112 extern int be_cmd_reset(struct be_adapter *adapter);
44113 extern int be_cmd_get_stats(struct be_adapter *adapter,
44114 struct be_dma_mem *nonemb_cmd);
44115 -extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
44116 +extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
44117 + char *fw_on_flash);
44118
44119 extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
44120 extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
44121 u16 *vtag_array, u32 num, bool untagged,
44122 bool promiscuous);
44123 -extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
44124 - u8 port_num, bool en);
44125 -extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
44126 - struct dev_mc_list *mc_list, u32 mc_count);
44127 +extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
44128 extern int be_cmd_set_flow_control(struct be_adapter *adapter,
44129 u32 tx_fc, u32 rx_fc);
44130 extern int be_cmd_get_flow_control(struct be_adapter *adapter,
44131 u32 *tx_fc, u32 *rx_fc);
44132 -extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
44133 - u32 *port_num, u32 *cap);
44134 +extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
44135 + u32 *function_mode, u32 *functions_caps);
44136 extern int be_cmd_reset_function(struct be_adapter *adapter);
44137 -extern int be_process_mcc(struct be_adapter *adapter);
44138 +extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
44139 + u16 table_size);
44140 +extern int be_process_mcc(struct be_adapter *adapter, int *status);
44141 +extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
44142 + u8 port_num, u8 beacon, u8 status, u8 state);
44143 +extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
44144 + u8 port_num, u32 *state);
44145 +extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
44146 + u8 *connector);
44147 extern int be_cmd_write_flashrom(struct be_adapter *adapter,
44148 struct be_dma_mem *cmd, u32 flash_oper,
44149 u32 flash_opcode, u32 buf_size);
44150 +int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
44151 + int offset);
44152 +extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
44153 + struct be_dma_mem *nonemb_cmd);
44154 +extern int be_cmd_fw_init(struct be_adapter *adapter);
44155 +extern int be_cmd_fw_clean(struct be_adapter *adapter);
44156 +extern void be_async_mcc_enable(struct be_adapter *adapter);
44157 +extern void be_async_mcc_disable(struct be_adapter *adapter);
44158 +extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
44159 + u32 loopback_type, u32 pkt_size,
44160 + u32 num_pkts, u64 pattern);
44161 +extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
44162 + u32 byte_cnt, struct be_dma_mem *cmd);
44163 +extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
44164 + struct be_dma_mem *nonemb_cmd);
44165 +extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
44166 + u8 loopback_type, u8 enable);
44167 +extern int be_cmd_get_phy_info(struct be_adapter *adapter,
44168 + struct be_phy_info *phy_info);
44169 +extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
44170 +extern void be_detect_dump_ue(struct be_adapter *adapter);
44171 +extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
44172 +extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
44173 +extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
44174 +extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
44175 +extern int be_cmd_req_native_mode(struct be_adapter *adapter);
44176 +extern int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name);
44177 +extern int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name);
44178 +extern int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs);
44179 +
44180 +extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
44181 + u32 *privilege, u32 domain);
44182 +extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
44183 + u32 mask, u32 *prev, u32 domain);
44184 +extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
44185 + u32 domain, u16 intf_id);
44186 +extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
44187 + u32 domain, u16 intf_id);
44188 +extern int be_cmd_set_port_speed_v1(struct be_adapter *adapter, u8 port_num,
44189 + u16 mac_speed, u16 dac_cable_len);
44190 +extern int be_cmd_get_port_speed(struct be_adapter *adapter, u8 port_num,
44191 + u16 *dac_cable_len, u16 *port_speed);
44192 +#ifdef CONFIG_PALAU
44193 +int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma,
44194 + int req_size, void *va);
44195 +#endif
44196 +
44197 +#endif /* !BE_CMDS_H */
44198 diff --git a/drivers/net/benet/be_compat.c b/drivers/net/benet/be_compat.c
44199 new file mode 100644
44200 index 0000000..bdd1dba
44201 --- /dev/null
44202 +++ b/drivers/net/benet/be_compat.c
44203 @@ -0,0 +1,630 @@
44204 +/*
44205 + * Copyright (C) 2005 - 2011 Emulex
44206 + * All rights reserved.
44207 + *
44208 + * This program is free software; you can redistribute it and/or
44209 + * modify it under the terms of the GNU General Public License version 2
44210 + * as published by the Free Software Foundation. The full GNU General
44211 + * Public License is included in this distribution in the file called COPYING.
44212 + *
44213 + * Contact Information:
44214 + * linux-drivers@emulex.com
44215 + *
44216 + * Emulex
44217 + * 3333 Susan Street
44218 + * Costa Mesa, CA 92626
44219 + */
44220 +
44221 +#include "be.h"
44222 +
44223 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44224 +void be_netdev_ops_init(struct net_device *netdev, struct net_device_ops *ops)
44225 +{
44226 + netdev->open = ops->ndo_open;
44227 + netdev->stop = ops->ndo_stop;
44228 + netdev->hard_start_xmit = ops->ndo_start_xmit;
44229 + netdev->set_mac_address = ops->ndo_set_mac_address;
44230 + netdev->get_stats = ops->ndo_get_stats;
44231 + netdev->set_multicast_list = ops->ndo_set_rx_mode;
44232 + netdev->change_mtu = ops->ndo_change_mtu;
44233 + netdev->vlan_rx_register = ops->ndo_vlan_rx_register;
44234 + netdev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
44235 + netdev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
44236 + netdev->do_ioctl = ops->ndo_do_ioctl;
44237 +#ifdef CONFIG_NET_POLL_CONTROLLER
44238 + netdev->poll_controller = ops->ndo_poll_controller;
44239 +#endif
44240 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
44241 + netdev->select_queue = ops->ndo_select_queue;
44242 +#endif
44243 +}
44244 +#endif
44245 +
44246 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44247 +int eth_validate_addr(struct net_device *netdev)
44248 +{
44249 + return 0;
44250 +}
44251 +#endif
44252 +
44253 +/* New NAPI backport */
44254 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24)
44255 +
44256 +int be_poll_compat(struct net_device *netdev, int *budget)
44257 +{
44258 + struct napi_struct *napi = netdev->priv;
44259 + u32 work_done, can_do;
44260 +
44261 + can_do = min(*budget, netdev->quota);
44262 + work_done = napi->poll(napi, can_do);
44263 +
44264 + *budget -= work_done;
44265 + netdev->quota -= work_done;
44266 + if (napi->rx)
44267 + return (work_done >= can_do);
44268 + return 0;
44269 +}
44270 +
44271 +
44272 +#endif /* New NAPI backport */
44273 +
44274 +int be_netif_napi_add(struct net_device *netdev,
44275 + struct napi_struct *napi,
44276 + int (*poll) (struct napi_struct *, int), int weight)
44277 +{
44278 +#ifdef HAVE_SIMULATED_MULTI_NAPI
44279 + struct be_adapter *adapter = netdev_priv(netdev);
44280 + struct net_device *nd;
44281 +
44282 + nd = alloc_netdev(0, "", ether_setup);
44283 + if (!nd)
44284 + return -ENOMEM;
44285 + nd->priv = napi;
44286 + nd->weight = BE_NAPI_WEIGHT;
44287 + nd->poll = be_poll_compat;
44288 + set_bit(__LINK_STATE_START, &nd->state);
44289 +
44290 + if (napi == &adapter->rx_obj[0].rx_eq.napi)
44291 + napi->rx = true;
44292 + napi->poll = poll;
44293 + napi->dev = nd;
44294 +#ifdef RHEL_NEW_NAPI
44295 + napi->napi.dev = netdev;
44296 +#endif
44297 + return 0;
44298 +#else
44299 + netif_napi_add(netdev, napi, poll, weight);
44300 + return 0;
44301 +#endif
44302 +}
44303 +void be_netif_napi_del(struct net_device *netdev)
44304 +{
44305 +#ifdef HAVE_SIMULATED_MULTI_NAPI
44306 + struct be_adapter *adapter = netdev_priv(netdev);
44307 + struct napi_struct *napi;
44308 + struct be_rx_obj *rxo;
44309 + int i;
44310 +
44311 + for_all_rx_queues(adapter, rxo, i) {
44312 + napi = &rxo->rx_eq.napi;
44313 + if (napi->dev) {
44314 + free_netdev(napi->dev);
44315 + napi->dev = NULL;
44316 + }
44317 + }
44318 +
44319 + napi = &adapter->tx_eq.napi;
44320 + if (napi->dev) {
44321 + free_netdev(napi->dev);
44322 + napi->dev = NULL;
44323 + }
44324 +#endif
44325 +}
44326 +/* INET_LRO backport */
44327 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
44328 +
44329 +#define TCP_HDR_LEN(tcph) (tcph->doff << 2)
44330 +#define IP_HDR_LEN(iph) (iph->ihl << 2)
44331 +#define TCP_PAYLOAD_LENGTH(iph, tcph) (ntohs(iph->tot_len) - IP_HDR_LEN(iph) \
44332 + - TCP_HDR_LEN(tcph))
44333 +
44334 +#define IPH_LEN_WO_OPTIONS 5
44335 +#define TCPH_LEN_WO_OPTIONS 5
44336 +#define TCPH_LEN_W_TIMESTAMP 8
44337 +
44338 +#define LRO_MAX_PG_HLEN 64
44339 +#define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; }
44340 +/*
44341 + * Basic tcp checks whether packet is suitable for LRO
44342 + */
44343 +static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph,
44344 + int len, struct net_lro_desc *lro_desc)
44345 +{
44346 + /* check ip header: don't aggregate padded frames */
44347 + if (ntohs(iph->tot_len) != len)
44348 + return -1;
44349 +
44350 + if (iph->ihl != IPH_LEN_WO_OPTIONS)
44351 + return -1;
44352 +
44353 + if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack
44354 + || tcph->rst || tcph->syn || tcph->fin)
44355 + return -1;
44356 +
44357 + if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
44358 + return -1;
44359 +
44360 + if (tcph->doff != TCPH_LEN_WO_OPTIONS
44361 + && tcph->doff != TCPH_LEN_W_TIMESTAMP)
44362 + return -1;
44363 +
44364 + /* check tcp options (only timestamp allowed) */
44365 + if (tcph->doff == TCPH_LEN_W_TIMESTAMP) {
44366 + u32 *topt = (u32 *)(tcph + 1);
44367 +
44368 + if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
44369 + | (TCPOPT_TIMESTAMP << 8)
44370 + | TCPOLEN_TIMESTAMP))
44371 + return -1;
44372 +
44373 + /* timestamp should be in right order */
44374 + topt++;
44375 + if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval),
44376 + ntohl(*topt)))
44377 + return -1;
44378 +
44379 + /* timestamp reply should not be zero */
44380 + topt++;
44381 + if (*topt == 0)
44382 + return -1;
44383 + }
44384 +
44385 + return 0;
44386 +}
44387 +
44388 +static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc)
44389 +{
44390 + struct iphdr *iph = lro_desc->iph;
44391 + struct tcphdr *tcph = lro_desc->tcph;
44392 + u32 *p;
44393 + __wsum tcp_hdr_csum;
44394 +
44395 + tcph->ack_seq = lro_desc->tcp_ack;
44396 + tcph->window = lro_desc->tcp_window;
44397 +
44398 + if (lro_desc->tcp_saw_tstamp) {
44399 + p = (u32 *)(tcph + 1);
44400 + *(p+2) = lro_desc->tcp_rcv_tsecr;
44401 + }
44402 +
44403 + iph->tot_len = htons(lro_desc->ip_tot_len);
44404 +
44405 + iph->check = 0;
44406 + iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl);
44407 +
44408 + tcph->check = 0;
44409 + tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), 0);
44410 + lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum);
44411 + tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
44412 + lro_desc->ip_tot_len -
44413 + IP_HDR_LEN(iph), IPPROTO_TCP,
44414 + lro_desc->data_csum);
44415 +}
44416 +
44417 +static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
44418 +{
44419 + __wsum tcp_csum;
44420 + __wsum tcp_hdr_csum;
44421 + __wsum tcp_ps_hdr_csum;
44422 +
44423 + tcp_csum = ~csum_unfold(tcph->check);
44424 + tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), tcp_csum);
44425 +
44426 + tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
44427 + len + TCP_HDR_LEN(tcph),
44428 + IPPROTO_TCP, 0);
44429 +
44430 + return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
44431 + tcp_ps_hdr_csum);
44432 +}
44433 +
44434 +static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
44435 + struct iphdr *iph, struct tcphdr *tcph,
44436 + u16 vlan_tag, struct vlan_group *vgrp)
44437 +{
44438 + int nr_frags;
44439 + u32 *ptr;
44440 + u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
44441 +
44442 + nr_frags = skb_shinfo(skb)->nr_frags;
44443 + lro_desc->parent = skb;
44444 + lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]);
44445 + lro_desc->iph = iph;
44446 + lro_desc->tcph = tcph;
44447 + lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
44448 + lro_desc->tcp_ack = ntohl(tcph->ack_seq);
44449 + lro_desc->tcp_window = tcph->window;
44450 +
44451 + lro_desc->pkt_aggr_cnt = 1;
44452 + lro_desc->ip_tot_len = ntohs(iph->tot_len);
44453 +
44454 + if (tcph->doff == 8) {
44455 + ptr = (u32 *)(tcph+1);
44456 + lro_desc->tcp_saw_tstamp = 1;
44457 + lro_desc->tcp_rcv_tsval = *(ptr+1);
44458 + lro_desc->tcp_rcv_tsecr = *(ptr+2);
44459 + }
44460 +
44461 + lro_desc->mss = tcp_data_len;
44462 + lro_desc->vgrp = vgrp;
44463 + lro_desc->vlan_tag = vlan_tag;
44464 + lro_desc->active = 1;
44465 +
44466 + if (tcp_data_len)
44467 + lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
44468 + tcp_data_len);
44469 +
44470 + if (!tcp_data_len)
44471 + lro_desc->ack_cnt++;
44472 +}
44473 +
44474 +static inline void lro_clear_desc(struct net_lro_desc *lro_desc)
44475 +{
44476 + memset(lro_desc, 0, sizeof(struct net_lro_desc));
44477 +}
44478 +
44479 +static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
44480 + struct tcphdr *tcph, int tcp_data_len)
44481 +{
44482 + struct sk_buff *parent = lro_desc->parent;
44483 + u32 *topt;
44484 +
44485 + lro_desc->pkt_aggr_cnt++;
44486 + lro_desc->ip_tot_len += tcp_data_len;
44487 + lro_desc->tcp_next_seq += tcp_data_len;
44488 + lro_desc->tcp_window = tcph->window;
44489 + lro_desc->tcp_ack = tcph->ack_seq;
44490 +
44491 + /* don't update tcp_rcv_tsval, would not work with PAWS */
44492 + if (lro_desc->tcp_saw_tstamp) {
44493 + topt = (u32 *) (tcph + 1);
44494 + lro_desc->tcp_rcv_tsecr = *(topt + 2);
44495 + }
44496 +
44497 + if (tcp_data_len)
44498 + lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
44499 + lro_tcp_data_csum(iph, tcph,
44500 + tcp_data_len),
44501 + parent->len);
44502 +
44503 + parent->len += tcp_data_len;
44504 + parent->data_len += tcp_data_len;
44505 + if (tcp_data_len > lro_desc->mss)
44506 + lro_desc->mss = tcp_data_len;
44507 +}
44508 +
44509 +static void lro_add_frags(struct net_lro_desc *lro_desc,
44510 + int len, int hlen, int truesize,
44511 + struct skb_frag_struct *skb_frags,
44512 + struct iphdr *iph, struct tcphdr *tcph)
44513 +{
44514 + struct sk_buff *skb = lro_desc->parent;
44515 + int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
44516 +
44517 + lro_add_common(lro_desc, iph, tcph, tcp_data_len);
44518 +
44519 + skb->truesize += truesize;
44520 +
44521 + if (!tcp_data_len) {
44522 + put_page(skb_frags[0].page);
44523 + lro_desc->ack_cnt++;
44524 + return;
44525 + }
44526 +
44527 + skb_frags[0].page_offset += hlen;
44528 + skb_frags[0].size -= hlen;
44529 +
44530 + while (tcp_data_len > 0) {
44531 + *(lro_desc->next_frag) = *skb_frags;
44532 + tcp_data_len -= skb_frags->size;
44533 + lro_desc->next_frag++;
44534 + skb_frags++;
44535 + skb_shinfo(skb)->nr_frags++;
44536 + }
44537 +}
44538 +
44539 +static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
44540 + struct iphdr *iph,
44541 + struct tcphdr *tcph)
44542 +{
44543 + if ((lro_desc->iph->saddr != iph->saddr)
44544 + || (lro_desc->iph->daddr != iph->daddr)
44545 + || (lro_desc->tcph->source != tcph->source)
44546 + || (lro_desc->tcph->dest != tcph->dest))
44547 + return -1;
44548 + return 0;
44549 +}
44550 +
44551 +static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr,
44552 + struct net_lro_desc *lro_arr,
44553 + struct iphdr *iph,
44554 + struct tcphdr *tcph)
44555 +{
44556 + struct net_lro_desc *lro_desc = NULL;
44557 + struct net_lro_desc *tmp;
44558 + int max_desc = lro_mgr->max_desc;
44559 + int i;
44560 +
44561 + for (i = 0; i < max_desc; i++) {
44562 + tmp = &lro_arr[i];
44563 + if (tmp->active)
44564 + if (!lro_check_tcp_conn(tmp, iph, tcph)) {
44565 + lro_desc = tmp;
44566 + goto out;
44567 + }
44568 + }
44569 +
44570 + for (i = 0; i < max_desc; i++) {
44571 + if (!lro_arr[i].active) {
44572 + lro_desc = &lro_arr[i];
44573 + goto out;
44574 + }
44575 + }
44576 +
44577 + LRO_INC_STATS(lro_mgr, no_desc);
44578 +out:
44579 + return lro_desc;
44580 +}
44581 +
44582 +static void lro_flush(struct net_lro_mgr *lro_mgr,
44583 + struct net_lro_desc *lro_desc)
44584 +{
44585 + struct be_adapter *adapter = netdev_priv(lro_mgr->dev);
44586 +
44587 + if (lro_desc->pkt_aggr_cnt > 1)
44588 + lro_update_tcp_ip_header(lro_desc);
44589 +
44590 + skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss;
44591 +
44592 + if (lro_desc->vgrp) {
44593 + if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44594 + vlan_hwaccel_receive_skb(lro_desc->parent,
44595 + lro_desc->vgrp,
44596 + lro_desc->vlan_tag);
44597 + else
44598 + vlan_hwaccel_rx(lro_desc->parent,
44599 + lro_desc->vgrp,
44600 + lro_desc->vlan_tag);
44601 +
44602 + } else {
44603 + if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44604 + netif_receive_skb(lro_desc->parent);
44605 + else
44606 + netif_rx(lro_desc->parent);
44607 + }
44608 +
44609 + LRO_INC_STATS(lro_mgr, flushed);
44610 + lro_clear_desc(lro_desc);
44611 +}
44612 +
44613 +static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
44614 + struct skb_frag_struct *frags,
44615 + int len, int true_size,
44616 + void *mac_hdr,
44617 + int hlen, __wsum sum,
44618 + u32 ip_summed)
44619 +{
44620 + struct sk_buff *skb;
44621 + struct skb_frag_struct *skb_frags;
44622 + int data_len = len;
44623 + int hdr_len = min(len, hlen);
44624 +
44625 + skb = netdev_alloc_skb(lro_mgr->dev, hlen);
44626 + if (!skb)
44627 + return NULL;
44628 +
44629 + skb->len = len;
44630 + skb->data_len = len - hdr_len;
44631 + skb->truesize += true_size;
44632 + skb->tail += hdr_len;
44633 +
44634 + memcpy(skb->data, mac_hdr, hdr_len);
44635 +
44636 + if (skb->data_len) {
44637 + skb_frags = skb_shinfo(skb)->frags;
44638 + while (data_len > 0) {
44639 + *skb_frags = *frags;
44640 + data_len -= frags->size;
44641 + skb_frags++;
44642 + frags++;
44643 + skb_shinfo(skb)->nr_frags++;
44644 + }
44645 + skb_shinfo(skb)->frags[0].page_offset += hdr_len;
44646 + skb_shinfo(skb)->frags[0].size -= hdr_len;
44647 + } else {
44648 + put_page(frags[0].page);
44649 + }
44650 +
44651 +
44652 + skb->ip_summed = ip_summed;
44653 + skb->csum = sum;
44654 + skb->protocol = eth_type_trans(skb, lro_mgr->dev);
44655 + return skb;
44656 +}
44657 +
44658 +static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
44659 + struct skb_frag_struct *frags,
44660 + int len, int true_size,
44661 + struct vlan_group *vgrp,
44662 + u16 vlan_tag, void *priv, __wsum sum)
44663 +{
44664 + struct net_lro_desc *lro_desc;
44665 + struct iphdr *iph;
44666 + struct tcphdr *tcph;
44667 + struct sk_buff *skb;
44668 + u64 flags;
44669 + void *mac_hdr;
44670 + int mac_hdr_len;
44671 + int hdr_len = LRO_MAX_PG_HLEN;
44672 + int vlan_hdr_len = 0;
44673 + u8 pad_bytes;
44674 +
44675 + if (!lro_mgr->get_frag_header
44676 + || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
44677 + (void *)&tcph, &flags, priv)) {
44678 + mac_hdr = page_address(frags->page) + frags->page_offset;
44679 + goto out1;
44680 + }
44681 +
44682 + if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
44683 + goto out1;
44684 +
44685 + hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
44686 + mac_hdr_len = (int)((void *)(iph) - mac_hdr);
44687 +
44688 + lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
44689 + if (!lro_desc)
44690 + goto out1;
44691 +
44692 + pad_bytes = len - (ntohs(iph->tot_len) + mac_hdr_len);
44693 + if (!TCP_PAYLOAD_LENGTH(iph, tcph) && pad_bytes) {
44694 + len -= pad_bytes; /* trim the packet */
44695 + frags[0].size -= pad_bytes;
44696 + true_size -= pad_bytes;
44697 + }
44698 +
44699 + if (!lro_desc->active) { /* start new lro session */
44700 + if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
44701 + goto out1;
44702 +
44703 + skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
44704 + hdr_len, 0, lro_mgr->ip_summed_aggr);
44705 + if (!skb)
44706 + goto out;
44707 +
44708 + if ((skb->protocol == htons(ETH_P_8021Q))
44709 + && !test_bit(LRO_F_EXTRACT_VLAN_ID, &lro_mgr->features))
44710 + vlan_hdr_len = VLAN_HLEN;
44711 +
44712 + iph = (void *)(skb->data + vlan_hdr_len);
44713 + tcph = (void *)((u8 *)skb->data + vlan_hdr_len
44714 + + IP_HDR_LEN(iph));
44715 +
44716 + lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp);
44717 + LRO_INC_STATS(lro_mgr, aggregated);
44718 + return 0;
44719 + }
44720 +
44721 + if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
44722 + goto out2;
44723 +
44724 + if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
44725 + goto out2;
44726 +
44727 + lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
44728 + LRO_INC_STATS(lro_mgr, aggregated);
44729 +
44730 + if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
44731 + lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
44732 + lro_flush(lro_mgr, lro_desc);
44733 +
44734 + return NULL;
44735 +
44736 +out2: /* send aggregated packets to the stack */
44737 + lro_flush(lro_mgr, lro_desc);
44738 +
44739 +out1: /* Original packet has to be posted to the stack */
44740 + skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
44741 + hdr_len, sum, lro_mgr->ip_summed);
44742 +out:
44743 + return skb;
44744 +}
44745 +
44746 +void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44747 + struct skb_frag_struct *frags,
44748 + int len, int true_size, void *priv, __wsum sum)
44749 +{
44750 + struct sk_buff *skb;
44751 +
44752 + skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0,
44753 + priv, sum);
44754 + if (!skb)
44755 + return;
44756 +
44757 + if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44758 + netif_receive_skb(skb);
44759 + else
44760 + netif_rx(skb);
44761 +}
44762 +
44763 +void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44764 + struct skb_frag_struct *frags,
44765 + int len, int true_size,
44766 + struct vlan_group *vgrp,
44767 + u16 vlan_tag, void *priv, __wsum sum)
44768 +{
44769 + struct sk_buff *skb;
44770 +
44771 + skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp,
44772 + vlan_tag, priv, sum);
44773 + if (!skb)
44774 + return;
44775 +
44776 + if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44777 + vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
44778 + else
44779 + vlan_hwaccel_rx(skb, vgrp, vlan_tag);
44780 +}
44781 +
44782 +void lro_flush_all_compat(struct net_lro_mgr *lro_mgr)
44783 +{
44784 + int i;
44785 + struct net_lro_desc *lro_desc = lro_mgr->lro_arr;
44786 +
44787 + for (i = 0; i < lro_mgr->max_desc; i++) {
44788 + if (lro_desc[i].active)
44789 + lro_flush(lro_mgr, &lro_desc[i]);
44790 + }
44791 +}
44792 +#endif /* INET_LRO backport */
44793 +
44794 +#ifndef TX_MQ
44795 +struct net_device *alloc_etherdev_mq_compat(int sizeof_priv,
44796 + unsigned int queue_count)
44797 +{
44798 + return alloc_etherdev(sizeof_priv);
44799 +}
44800 +
44801 +void netif_wake_subqueue_compat(struct net_device *dev, u16 queue_index)
44802 +{
44803 + netif_wake_queue(dev);
44804 +}
44805 +
44806 +void netif_stop_subqueue_compat(struct net_device *dev, u16 queue_index)
44807 +{
44808 + netif_stop_queue(dev);
44809 +}
44810 +
44811 +int __netif_subqueue_stopped_compat(const struct net_device *dev,
44812 + u16 queue_index)
44813 +{
44814 + return netif_queue_stopped(dev);
44815 +}
44816 +
44817 +u16 skb_get_queue_mapping_compat(const struct sk_buff *skb)
44818 +{
44819 + return 0;
44820 +}
44821 +
44822 +void netif_set_real_num_tx_queues_compat(struct net_device *dev,
44823 + unsigned int txq)
44824 +{
44825 + return;
44826 +}
44827 +
44828 +u16 skb_tx_hash_compat(const struct net_device *dev,
44829 + const struct sk_buff *skb)
44830 +{
44831 + return 0;
44832 +}
44833 +#endif
44834 diff --git a/drivers/net/benet/be_compat.h b/drivers/net/benet/be_compat.h
44835 new file mode 100644
44836 index 0000000..8ceecc8
44837 --- /dev/null
44838 +++ b/drivers/net/benet/be_compat.h
44839 @@ -0,0 +1,621 @@
44840 +/*
44841 + * Copyright (C) 2005 - 2011 Emulex
44842 + * All rights reserved.
44843 + *
44844 + * This program is free software; you can redistribute it and/or
44845 + * modify it under the terms of the GNU General Public License version 2
44846 + * as published by the Free Software Foundation. The full GNU General
44847 + * Public License is included in this distribution in the file called COPYING.
44848 + *
44849 + * Contact Information:
44850 + * linux-drivers@emulex.com
44851 + *
44852 + * Emulex
44853 + * 3333 Susan Street
44854 + * Costa Mesa, CA 92626
44855 + */
44856 +
44857 +#ifndef BE_COMPAT_H
44858 +#define BE_COMPAT_H
44859 +
44860 +/****************** RHEL5 and SLES10 backport ***************************/
44861 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
44862 +
44863 +#ifndef upper_32_bits
44864 +#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
44865 +#endif
44866 +
44867 +#ifndef CHECKSUM_PARTIAL
44868 +#define CHECKSUM_PARTIAL CHECKSUM_HW
44869 +#define CHECKSUM_COMPLETE CHECKSUM_HW
44870 +#endif
44871 +
44872 +#if !defined(ip_hdr)
44873 +#define ip_hdr(skb) (skb->nh.iph)
44874 +#define ipv6_hdr(skb) (skb->nh.ipv6h)
44875 +#endif
44876 +
44877 +#if !defined(__packed)
44878 +#define __packed __attribute__ ((packed))
44879 +#endif
44880 +
44881 +#if !defined(RHEL_MINOR)
44882 +/* Only for RH5U1 (Maui) and SLES10 NIC driver */
44883 +enum {
44884 + false = 0,
44885 + true = 1
44886 +};
44887 +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
44888 +/* Only for RH5U1 (Maui) NIC driver */
44889 +static inline __attribute__((const))
44890 +int __ilog2_u32(u32 n)
44891 +{
44892 + return fls(n) - 1;
44893 +}
44894 +#endif
44895 +#endif
44896 +
44897 +#define ETH_FCS_LEN 4
44898 +#define bool u8
44899 +#ifndef PTR_ALIGN
44900 +#define PTR_ALIGN(p, a) ((typeof(p)) \
44901 + ALIGN((unsigned long)(p), (a)))
44902 +#endif
44903 +#define list_first_entry(ptr, type, member) \
44904 + list_entry((ptr)->next, type, member)
44905 +
44906 +#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
44907 + LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
44908 +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] \
44909 + __devinitdata
44910 +#endif
44911 +
44912 +/* Backport of request_irq */
44913 +typedef irqreturn_t(*backport_irq_handler_t) (int, void *);
44914 +static inline int
44915 +backport_request_irq(unsigned int irq, irqreturn_t(*handler) (int, void *),
44916 + unsigned long flags, const char *dev_name, void *dev_id)
44917 +{
44918 + return request_irq(irq,
44919 + (irqreturn_t(*) (int, void *, struct pt_regs *))handler,
44920 + flags, dev_name, dev_id);
44921 +}
44922 +#define request_irq backport_request_irq
44923 +
44924 +#endif /*** RHEL5 and SLES10 backport ***/
44925 +
44926 +#if !defined(__packed)
44927 +#define __packed __attribute__ ((packed))
44928 +#endif
44929 +
44930 +/****************** SLES10 only backport ***************************/
44931 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
44932 +
44933 +#include <linux/tifm.h>
44934 +
44935 +#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f))
44936 +#define IRQF_SHARED SA_SHIRQ
44937 +#define CHECKSUM_PARTIAL CHECKSUM_HW
44938 +#define CHECKSUM_COMPLETE CHECKSUM_HW
44939 +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
44940 +#define NETIF_F_IPV6_CSUM NETIF_F_IP_CSUM
44941 +#define NETIF_F_TSO6 NETIF_F_TSO
44942 +
44943 +
44944 +static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
44945 + unsigned int length)
44946 +{
44947 + /* 16 == NET_PAD_SKB */
44948 + struct sk_buff *skb;
44949 + skb = alloc_skb(length + 16, GFP_ATOMIC);
44950 + if (likely(skb != NULL)) {
44951 + skb_reserve(skb, 16);
44952 + skb->dev = dev;
44953 + }
44954 + return skb;
44955 +}
44956 +
44957 +#define PCI_SAVE_STATE(x)
44958 +
44959 +#else /* SLES10 only backport */
44960 +
44961 +#define PCI_SAVE_STATE(x) pci_save_state(x)
44962 +
44963 +#endif /* SLES10 only backport */
44964 +
44965 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)
44966 +#define netdev_tx_t int
44967 +#endif
44968 +
44969 +#ifndef VLAN_PRIO_MASK
44970 +#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
44971 +#define VLAN_PRIO_SHIFT 13
44972 +#endif
44973 +
44974 +/*
44975 + * Backport of netdev ops struct
44976 + */
44977 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44978 +struct net_device_ops {
44979 + int (*ndo_init)(struct net_device *dev);
44980 + void (*ndo_uninit)(struct net_device *dev);
44981 + int (*ndo_open)(struct net_device *dev);
44982 + int (*ndo_stop)(struct net_device *dev);
44983 + int (*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev);
44984 + u16 (*ndo_select_queue)(struct net_device *dev,
44985 + struct sk_buff *skb);
44986 + void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
44987 + void (*ndo_set_rx_mode)(struct net_device *dev);
44988 + void (*ndo_set_multicast_list)(struct net_device *dev);
44989 + int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
44990 + int (*ndo_validate_addr)(struct net_device *dev);
44991 + int (*ndo_do_ioctl)(struct net_device *dev,
44992 + struct ifreq *ifr, int cmd);
44993 + int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
44994 + int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
44995 + int (*ndo_neigh_setup)(struct net_device *dev,
44996 + struct neigh_parms *);
44997 + void (*ndo_tx_timeout) (struct net_device *dev);
44998 +
44999 + struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
45000 +
45001 + void (*ndo_vlan_rx_register)(struct net_device *dev,
45002 + struct vlan_group *grp);
45003 + void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
45004 + unsigned short vid);
45005 + void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
45006 + unsigned short vid);
45007 +#ifdef CONFIG_NET_POLL_CONTROLLER
45008 +#define HAVE_NETDEV_POLL
45009 + void (*ndo_poll_controller)(struct net_device *dev);
45010 +#endif
45011 +};
45012 +extern void be_netdev_ops_init(struct net_device *netdev,
45013 + struct net_device_ops *ops);
45014 +extern int eth_validate_addr(struct net_device *);
45015 +
45016 +#endif /* Netdev ops backport */
45017 +
45018 +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 29)
45019 +#undef NETIF_F_GRO
45020 +#endif
45021 +
45022 +#ifdef NO_GRO
45023 +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5)))
45024 +#undef NETIF_F_GRO
45025 +#endif
45026 +#endif
45027 +
45028 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45029 +#define HAVE_ETHTOOL_FLASH
45030 +#endif
45031 +
45032 +/*
45033 + * Backport of NAPI
45034 + */
45035 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24)
45036 +
45037 +#if defined(RHEL_MINOR) && (RHEL_MINOR > 3)
45038 +#define RHEL_NEW_NAPI
45039 +#endif
45040 +
45041 +/* We need a new struct that has some meta data beyond rhel 5.4's napi_struct
45042 + * to fix rhel5.4's half-baked new napi implementation.
45043 + * We don't want to use rhel 5.4's broken napi_complete; so
45044 + * define a new be_napi_complete that executes the logic only for Rx
45045 + */
45046 +
45047 +#ifdef RHEL_NEW_NAPI
45048 +#define napi_complete be_napi_complete
45049 +typedef struct napi_struct rhel_napi_struct;
45050 +#endif
45051 +#define napi_struct be_napi_struct
45052 +#define napi_gro_frags(napi) napi_gro_frags((rhel_napi_struct *) napi)
45053 +#define vlan_gro_frags(napi, vlan_grp, vid)\
45054 + vlan_gro_frags((rhel_napi_struct *) napi, vlan_grp, vid)
45055 +#define napi_get_frags(napi) napi_get_frags((rhel_napi_struct *) napi)
45056 +
45057 +struct napi_struct {
45058 +#ifdef RHEL_NEW_NAPI
45059 + rhel_napi_struct napi; /* must be the first member */
45060 +#endif
45061 + struct net_device *dev;
45062 + int (*poll) (struct napi_struct *napi, int budget);
45063 + bool rx;
45064 +};
45065 +
45066 +static inline void napi_complete(struct napi_struct *napi)
45067 +{
45068 +#ifdef NETIF_F_GRO
45069 + napi_gro_flush((rhel_napi_struct *)napi);
45070 +#endif
45071 + netif_rx_complete(napi->dev);
45072 +}
45073 +
45074 +static inline void napi_schedule(struct napi_struct *napi)
45075 +{
45076 + netif_rx_schedule(napi->dev);
45077 +}
45078 +
45079 +static inline void napi_enable(struct napi_struct *napi)
45080 +{
45081 + netif_poll_enable(napi->dev);
45082 +}
45083 +
45084 +static inline void napi_disable(struct napi_struct *napi)
45085 +{
45086 + netif_poll_disable(napi->dev);
45087 +}
45088 +
45089 +#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
45090 + LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
45091 +static inline void vlan_group_set_device(struct vlan_group *vg,
45092 + u16 vlan_id,
45093 + struct net_device *dev)
45094 +{
45095 + struct net_device **array;
45096 + if (!vg)
45097 + return;
45098 + array = vg->vlan_devices;
45099 + array[vlan_id] = dev;
45100 +}
45101 +#endif
45102 +
45103 +#endif /* New NAPI backport */
45104 +
45105 +extern int be_netif_napi_add(struct net_device *netdev,
45106 + struct napi_struct *napi,
45107 + int (*poll) (struct napi_struct *, int), int weight);
45108 +extern void be_netif_napi_del(struct net_device *netdev);
45109 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
45110 +#define HAVE_SIMULATED_MULTI_NAPI
45111 +#endif
45112 +
45113 +/************** Backport of Delayed work queues interface ****************/
45114 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
45115 +#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
45116 + LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
45117 +struct delayed_work {
45118 + struct work_struct work;
45119 +};
45120 +#endif
45121 +
45122 +#define INIT_DELAYED_WORK(_work, _func) \
45123 + INIT_WORK(&(_work)->work, _func, &(_work)->work)
45124 +
45125 +static inline int backport_cancel_delayed_work_sync(struct delayed_work *work)
45126 +{
45127 + cancel_rearming_delayed_work(&work->work);
45128 + return 0;
45129 +}
45130 +#define cancel_delayed_work_sync backport_cancel_delayed_work_sync
45131 +
45132 +static inline int backport_schedule_delayed_work(struct delayed_work *work,
45133 + unsigned long delay)
45134 +{
45135 + if (unlikely(!delay))
45136 + return schedule_work(&work->work);
45137 + else
45138 + return schedule_delayed_work(&work->work, delay);
45139 +}
45140 +#define schedule_delayed_work backport_schedule_delayed_work
45141 +#endif /* backport delayed workqueue */
45142 +
45143 +
45144 +/************** Backport of INET_LRO **********************************/
45145 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
45146 +
45147 +#include <linux/inet_lro.h>
45148 +
45149 +#else
45150 +
45151 +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
45152 +
45153 +#if defined(RHEL_MINOR) && RHEL_MINOR < 6
45154 +typedef __u16 __bitwise __sum16;
45155 +typedef __u32 __bitwise __wsum;
45156 +#endif
45157 +
45158 +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR <= 3)) || \
45159 + (!defined(RHEL_MINOR)))
45160 +static inline __wsum csum_unfold(__sum16 n)
45161 +{
45162 + return (__force __wsum)n;
45163 +}
45164 +#endif
45165 +
45166 +#endif
45167 +
45168 +#define lro_flush_all lro_flush_all_compat
45169 +#define lro_vlan_hwaccel_receive_frags lro_vlan_hwaccel_receive_frags_compat
45170 +#define lro_receive_frags lro_receive_frags_compat
45171 +
45172 +struct net_lro_stats {
45173 + unsigned long aggregated;
45174 + unsigned long flushed;
45175 + unsigned long no_desc;
45176 +};
45177 +
45178 +struct net_lro_desc {
45179 + struct sk_buff *parent;
45180 + struct sk_buff *last_skb;
45181 + struct skb_frag_struct *next_frag;
45182 + struct iphdr *iph;
45183 + struct tcphdr *tcph;
45184 + struct vlan_group *vgrp;
45185 + __wsum data_csum;
45186 + u32 tcp_rcv_tsecr;
45187 + u32 tcp_rcv_tsval;
45188 + u32 tcp_ack;
45189 + u32 tcp_next_seq;
45190 + u32 skb_tot_frags_len;
45191 + u32 ack_cnt;
45192 + u16 ip_tot_len;
45193 + u16 tcp_saw_tstamp; /* timestamps enabled */
45194 + u16 tcp_window;
45195 + u16 vlan_tag;
45196 + int pkt_aggr_cnt; /* counts aggregated packets */
45197 + int vlan_packet;
45198 + int mss;
45199 + int active;
45200 +};
45201 +
45202 +struct net_lro_mgr {
45203 + struct net_device *dev;
45204 + struct net_lro_stats stats;
45205 +
45206 + /* LRO features */
45207 + unsigned long features;
45208 +#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */
45209 +#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted
45210 + from received packets and eth protocol
45211 + is still ETH_P_8021Q */
45212 +
45213 + u32 ip_summed; /* Set in non generated SKBs in page mode */
45214 + u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
45215 + * or CHECKSUM_NONE */
45216 +
45217 + int max_desc; /* Max number of LRO descriptors */
45218 + int max_aggr; /* Max number of LRO packets to be aggregated */
45219 +
45220 + struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
45221 +
45222 + /* Optimized driver functions
45223 + * get_skb_header: returns tcp and ip header for packet in SKB
45224 + */
45225 + int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
45226 + void **tcpudp_hdr, u64 *hdr_flags, void *priv);
45227 +
45228 + /* hdr_flags: */
45229 +#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
45230 +#define LRO_TCP 2 /* tcpudp_hdr is TCP header */
45231 +
45232 + /*
45233 + * get_frag_header: returns mac, tcp and ip header for packet in SKB
45234 + *
45235 + * @hdr_flags: Indicate what kind of LRO has to be done
45236 + * (IPv4/IPv6/TCP/UDP)
45237 + */
45238 + int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
45239 + void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
45240 + void *priv);
45241 +};
45242 +
45243 +extern void lro_receive_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
45244 + void *priv);
45245 +
45246 +extern void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr,
45247 + struct sk_buff *skb, struct vlan_group *vgrp,
45248 + u16 vlan_tag, void *priv);
45249 +
45250 +/* This functions aggregate fragments and generate SKBs do pass
45251 + * the packets to the stack.
45252 + *
45253 + * @lro_mgr: LRO manager to use
45254 + * @frags: Fragment to be processed. Must contain entire header in first
45255 + * element.
45256 + * @len: Length of received data
45257 + * @true_size: Actual size of memory the fragment is consuming
45258 + * @priv: Private data that may be used by driver functions
45259 + * (for example get_tcp_ip_hdr)
45260 + */
45261 +extern void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr,
45262 + struct skb_frag_struct *frags, int len, int true_size,
45263 + void *priv, __wsum sum);
45264 +
45265 +extern void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr,
45266 + struct skb_frag_struct *frags, int len, int true_size,
45267 + struct vlan_group *vgrp, u16 vlan_tag, void *priv,
45268 + __wsum sum);
45269 +
45270 +/* Forward all aggregated SKBs held by lro_mgr to network stack */
45271 +extern void lro_flush_all_compat(struct net_lro_mgr *lro_mgr);
45272 +
45273 +extern void lro_flush_pkt(struct net_lro_mgr *lro_mgr, struct iphdr *iph,
45274 + struct tcphdr *tcph);
45275 +#endif /* backport of inet_lro */
45276 +
45277 +#ifndef ETHTOOL_FLASH_MAX_FILENAME
45278 +#define ETHTOOL_FLASH_MAX_FILENAME 128
45279 +#endif
45280 +
45281 +#if defined(CONFIG_XEN) && !defined(NETIF_F_GRO)
45282 +#define BE_INIT_FRAGS_PER_FRAME (u32) 1
45283 +#else
45284 +#define BE_INIT_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
45285 +#endif
45286 +
45287 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
45288 +#ifdef CONFIG_PCI_IOV
45289 +#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR == 6)))
45290 +#undef CONFIG_PCI_IOV
45291 +#endif
45292 +#endif
45293 +#endif
45294 +
45295 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
45296 +#define dev_to_node(dev) -1
45297 +#endif
45298 +
45299 +
45300 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
45301 +#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR > 6)))
45302 +static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
45303 + unsigned int length)
45304 +{
45305 + struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
45306 +
45307 + if (NET_IP_ALIGN && skb)
45308 + skb_reserve(skb, NET_IP_ALIGN);
45309 + return skb;
45310 +}
45311 +#endif
45312 +#endif
45313 +
45314 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
45315 +#ifndef netif_set_gso_max_size
45316 +#define netif_set_gso_max_size(netdev, size) do {} while (0)
45317 +#endif
45318 +#endif
45319 +
45320 +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
45321 +#if defined(RHEL_MINOR) && (RHEL_MINOR <= 4)
45322 +static inline int skb_is_gso_v6(const struct sk_buff *skb)
45323 +{
45324 + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
45325 +}
45326 +#endif
45327 +#endif
45328 +
45329 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45330 +static inline int skb_is_gso_v6(const struct sk_buff *skb)
45331 +{
45332 + return (ip_hdr(skb)->version == 6);
45333 +}
45334 +#endif
45335 +
45336 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45337 +#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
45338 +#endif
45339 +
45340 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45341 +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)))
45342 +#define HAVE_SRIOV_CONFIG
45343 +#endif
45344 +#endif
45345 +
45346 +#ifndef NETIF_F_VLAN_SG
45347 +#define NETIF_F_VLAN_SG NETIF_F_SG
45348 +#endif
45349 +
45350 +#ifndef NETIF_F_VLAN_CSUM
45351 +#define NETIF_F_VLAN_CSUM NETIF_F_HW_CSUM
45352 +#endif
45353 +
45354 +#ifndef NETIF_F_VLAN_TSO
45355 +#define NETIF_F_VLAN_TSO NETIF_F_TSO
45356 +#endif
45357 +
45358 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
45359 +#define vlan_features features
45360 +#endif
45361 +
45362 +#ifndef DEFINE_DMA_UNMAP_ADDR
45363 +#define DEFINE_DMA_UNMAP_ADDR(bus) dma_addr_t bus
45364 +#endif
45365 +
45366 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45367 +
45368 +#ifndef netdev_mc_count
45369 +#define netdev_mc_count(nd) (nd->mc_count)
45370 +#endif
45371 +
45372 +#ifndef netdev_hw_addr
45373 +#define netdev_hw_addr dev_mc_list
45374 +#endif
45375 +
45376 +#ifndef netdev_for_each_mc_addr
45377 +#define netdev_for_each_mc_addr(ha, nd) \
45378 + for (ha = (nd)->mc_list; ha; ha = ha->next)
45379 +#endif
45380 +
45381 +#define DMI_ADDR dmi_addr
45382 +#else
45383 +#define DMI_ADDR addr
45384 +#endif
45385 +
45386 +#ifndef VLAN_GROUP_ARRAY_LEN
45387 +#define VLAN_GROUP_ARRAY_LEN VLAN_N_VID
45388 +#endif
45389 +/**************************** Multi TXQ Support ******************************/
45390 +
45391 +/* Supported only in RHEL6 and SL11.1 (barring one execption) */
45392 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45393 +#define MQ_TX
45394 +#endif
45395 +
45396 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
45397 +#define alloc_etherdev_mq(sz, cnt) alloc_etherdev(sz)
45398 +#define skb_get_queue_mapping(skb) 0
45399 +#define skb_tx_hash(dev, skb) 0
45400 +#define netif_set_real_num_tx_queues(dev, txq) do {} while(0)
45401 +#define netif_wake_subqueue(dev, idx) netif_wake_queue(dev)
45402 +#define netif_stop_subqueue(dev, idx) netif_stop_queue(dev)
45403 +#define __netif_subqueue_stopped(dev, idx) netif_queue_stopped(dev)
45404 +#endif /* < 2.6.27 */
45405 +
45406 +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && \
45407 + (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)))
45408 +#define skb_tx_hash(dev, skb) 0
45409 +#define netif_set_real_num_tx_queues(dev, txq) do {} while(0)
45410 +#endif
45411 +
45412 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45413 +#define netif_set_real_num_tx_queues be_set_real_num_tx_queues
45414 +static inline void be_set_real_num_tx_queues(struct net_device *dev,
45415 + unsigned int txq)
45416 +{
45417 + dev->real_num_tx_queues = txq;
45418 +}
45419 +#endif
45420 +
45421 +#include <linux/if_vlan.h>
45422 +static inline void be_reset_skb_tx_vlan(struct sk_buff *skb)
45423 +{
45424 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
45425 + skb->vlan_tci = 0;
45426 +#else
45427 + struct vlan_skb_tx_cookie *cookie;
45428 +
45429 + cookie = VLAN_TX_SKB_CB(skb);
45430 + cookie->magic = 0;
45431 +#endif
45432 +}
45433 +
45434 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45435 +static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
45436 +{
45437 + skb->nh.raw = skb->data + offset;
45438 +}
45439 +#endif
45440 +
45441 +static inline struct sk_buff *be_vlan_put_tag(struct sk_buff *skb,
45442 + unsigned short vlan_tag)
45443 +{
45444 + struct sk_buff *new_skb = __vlan_put_tag(skb, vlan_tag);
45445 + /* On kernel versions < 2.6.27 the __vlan_put_tag() function
45446 + * distorts the network layer hdr pointer in the skb which
45447 + * affects the detection of UDP/TCP packets down the line in
45448 + * wrb_fill_hdr().This work-around sets it right.
45449 + */
45450 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
45451 + skb_set_network_header(new_skb, VLAN_ETH_HLEN);
45452 +#endif
45453 + return new_skb;
45454 +}
45455 +
45456 +#ifndef ACCESS_ONCE
45457 +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
45458 +#endif
45459 +
45460 +#endif /* BE_COMPAT_H */
45461 diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
45462 index f0fd95b..37bad99 100644
45463 --- a/drivers/net/benet/be_ethtool.c
45464 +++ b/drivers/net/benet/be_ethtool.c
45465 @@ -1,18 +1,18 @@
45466 /*
45467 - * Copyright (C) 2005 - 2009 ServerEngines
45468 + * Copyright (C) 2005 - 2011 Emulex
45469 * All rights reserved.
45470 *
45471 * This program is free software; you can redistribute it and/or
45472 * modify it under the terms of the GNU General Public License version 2
45473 - * as published by the Free Software Foundation. The full GNU General
45474 + * as published by the Free Software Foundation. The full GNU General
45475 * Public License is included in this distribution in the file called COPYING.
45476 *
45477 * Contact Information:
45478 - * linux-drivers@serverengines.com
45479 + * linux-drivers@emulex.com
45480 *
45481 - * ServerEngines
45482 - * 209 N. Fair Oaks Ave
45483 - * Sunnyvale, CA 94085
45484 + * Emulex
45485 + * 3333 Susan Street
45486 + * Costa Mesa, CA 92626
45487 */
45488
45489 #include "be.h"
45490 @@ -26,21 +26,19 @@ struct be_ethtool_stat {
45491 int offset;
45492 };
45493
45494 -enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT};
45495 +enum {NETSTAT, DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
45496 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
45497 offsetof(_struct, field)
45498 -#define NETSTAT_INFO(field) #field, NETSTAT,\
45499 +#define NETSTAT_INFO(field) #field, NETSTAT,\
45500 FIELDINFO(struct net_device_stats,\
45501 field)
45502 -#define DRVSTAT_INFO(field) #field, DRVSTAT,\
45503 - FIELDINFO(struct be_drvr_stats, field)
45504 -#define MISCSTAT_INFO(field) #field, MISCSTAT,\
45505 - FIELDINFO(struct be_rxf_stats, field)
45506 -#define PORTSTAT_INFO(field) #field, PORTSTAT,\
45507 - FIELDINFO(struct be_port_rxf_stats, \
45508 +#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
45509 + FIELDINFO(struct be_tx_stats, field)
45510 +#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
45511 + FIELDINFO(struct be_rx_stats, field)
45512 +#define DRVSTAT_INFO(field) #field, DRVSTAT,\
45513 + FIELDINFO(struct be_drv_stats, \
45514 field)
45515 -#define ERXSTAT_INFO(field) #field, ERXSTAT,\
45516 - FIELDINFO(struct be_erx_stats, field)
45517
45518 static const struct be_ethtool_stat et_stats[] = {
45519 {NETSTAT_INFO(rx_packets)},
45520 @@ -51,70 +49,131 @@ static const struct be_ethtool_stat et_stats[] = {
45521 {NETSTAT_INFO(tx_errors)},
45522 {NETSTAT_INFO(rx_dropped)},
45523 {NETSTAT_INFO(tx_dropped)},
45524 - {DRVSTAT_INFO(be_tx_reqs)},
45525 - {DRVSTAT_INFO(be_tx_stops)},
45526 - {DRVSTAT_INFO(be_fwd_reqs)},
45527 - {DRVSTAT_INFO(be_tx_wrbs)},
45528 - {DRVSTAT_INFO(be_polls)},
45529 {DRVSTAT_INFO(be_tx_events)},
45530 - {DRVSTAT_INFO(be_rx_events)},
45531 - {DRVSTAT_INFO(be_tx_compl)},
45532 - {DRVSTAT_INFO(be_rx_compl)},
45533 - {DRVSTAT_INFO(be_ethrx_post_fail)},
45534 - {DRVSTAT_INFO(be_802_3_dropped_frames)},
45535 - {DRVSTAT_INFO(be_802_3_malformed_frames)},
45536 - {DRVSTAT_INFO(be_tx_rate)},
45537 - {DRVSTAT_INFO(be_rx_rate)},
45538 - {PORTSTAT_INFO(rx_unicast_frames)},
45539 - {PORTSTAT_INFO(rx_multicast_frames)},
45540 - {PORTSTAT_INFO(rx_broadcast_frames)},
45541 - {PORTSTAT_INFO(rx_crc_errors)},
45542 - {PORTSTAT_INFO(rx_alignment_symbol_errors)},
45543 - {PORTSTAT_INFO(rx_pause_frames)},
45544 - {PORTSTAT_INFO(rx_control_frames)},
45545 - {PORTSTAT_INFO(rx_in_range_errors)},
45546 - {PORTSTAT_INFO(rx_out_range_errors)},
45547 - {PORTSTAT_INFO(rx_frame_too_long)},
45548 - {PORTSTAT_INFO(rx_address_match_errors)},
45549 - {PORTSTAT_INFO(rx_vlan_mismatch)},
45550 - {PORTSTAT_INFO(rx_dropped_too_small)},
45551 - {PORTSTAT_INFO(rx_dropped_too_short)},
45552 - {PORTSTAT_INFO(rx_dropped_header_too_small)},
45553 - {PORTSTAT_INFO(rx_dropped_tcp_length)},
45554 - {PORTSTAT_INFO(rx_dropped_runt)},
45555 - {PORTSTAT_INFO(rx_fifo_overflow)},
45556 - {PORTSTAT_INFO(rx_input_fifo_overflow)},
45557 - {PORTSTAT_INFO(rx_ip_checksum_errs)},
45558 - {PORTSTAT_INFO(rx_tcp_checksum_errs)},
45559 - {PORTSTAT_INFO(rx_udp_checksum_errs)},
45560 - {PORTSTAT_INFO(rx_non_rss_packets)},
45561 - {PORTSTAT_INFO(rx_ipv4_packets)},
45562 - {PORTSTAT_INFO(rx_ipv6_packets)},
45563 - {PORTSTAT_INFO(tx_unicastframes)},
45564 - {PORTSTAT_INFO(tx_multicastframes)},
45565 - {PORTSTAT_INFO(tx_broadcastframes)},
45566 - {PORTSTAT_INFO(tx_pauseframes)},
45567 - {PORTSTAT_INFO(tx_controlframes)},
45568 - {MISCSTAT_INFO(rx_drops_no_pbuf)},
45569 - {MISCSTAT_INFO(rx_drops_no_txpb)},
45570 - {MISCSTAT_INFO(rx_drops_no_erx_descr)},
45571 - {MISCSTAT_INFO(rx_drops_no_tpre_descr)},
45572 - {MISCSTAT_INFO(rx_drops_too_many_frags)},
45573 - {MISCSTAT_INFO(rx_drops_invalid_ring)},
45574 - {MISCSTAT_INFO(forwarded_packets)},
45575 - {MISCSTAT_INFO(rx_drops_mtu)},
45576 - {ERXSTAT_INFO(rx_drops_no_fragments)},
45577 + {DRVSTAT_INFO(rx_crc_errors)},
45578 + {DRVSTAT_INFO(rx_alignment_symbol_errors)},
45579 + {DRVSTAT_INFO(rx_pause_frames)},
45580 + {DRVSTAT_INFO(rx_control_frames)},
45581 + {DRVSTAT_INFO(rx_in_range_errors)},
45582 + {DRVSTAT_INFO(rx_out_range_errors)},
45583 + {DRVSTAT_INFO(rx_frame_too_long)},
45584 + {DRVSTAT_INFO(rx_address_match_errors)},
45585 + {DRVSTAT_INFO(rx_dropped_too_small)},
45586 + {DRVSTAT_INFO(rx_dropped_too_short)},
45587 + {DRVSTAT_INFO(rx_dropped_header_too_small)},
45588 + {DRVSTAT_INFO(rx_dropped_tcp_length)},
45589 + {DRVSTAT_INFO(rx_dropped_runt)},
45590 + {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
45591 + {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
45592 + {DRVSTAT_INFO(rx_ip_checksum_errs)},
45593 + {DRVSTAT_INFO(rx_tcp_checksum_errs)},
45594 + {DRVSTAT_INFO(rx_udp_checksum_errs)},
45595 + {DRVSTAT_INFO(rx_switched_unicast_packets)},
45596 + {DRVSTAT_INFO(rx_switched_multicast_packets)},
45597 + {DRVSTAT_INFO(rx_switched_broadcast_packets)},
45598 + {DRVSTAT_INFO(tx_pauseframes)},
45599 + {DRVSTAT_INFO(tx_controlframes)},
45600 + {DRVSTAT_INFO(rx_priority_pause_frames)},
45601 + {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
45602 + {DRVSTAT_INFO(jabber_events)},
45603 + {DRVSTAT_INFO(rx_drops_no_pbuf)},
45604 + {DRVSTAT_INFO(rx_drops_no_txpb)},
45605 + {DRVSTAT_INFO(rx_drops_no_erx_descr)},
45606 + {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
45607 + {DRVSTAT_INFO(rx_drops_too_many_frags)},
45608 + {DRVSTAT_INFO(rx_drops_invalid_ring)},
45609 + {DRVSTAT_INFO(forwarded_packets)},
45610 + {DRVSTAT_INFO(rx_drops_mtu)},
45611 + {DRVSTAT_INFO(eth_red_drops)},
45612 + {DRVSTAT_INFO(be_on_die_temperature)}
45613 };
45614 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
45615
45616 +/* Stats related to multi RX queues */
45617 +static const struct be_ethtool_stat et_rx_stats[] = {
45618 + {DRVSTAT_RX_INFO(rx_bytes)},
45619 + {DRVSTAT_RX_INFO(rx_pkts)},
45620 + {DRVSTAT_RX_INFO(rx_rate)},
45621 + {DRVSTAT_RX_INFO(rx_polls)},
45622 + {DRVSTAT_RX_INFO(rx_events)},
45623 + {DRVSTAT_RX_INFO(rx_compl)},
45624 + {DRVSTAT_RX_INFO(rx_mcast_pkts)},
45625 + {DRVSTAT_RX_INFO(rx_post_fail)},
45626 + {DRVSTAT_RX_INFO(rx_drops_no_frags)}
45627 +};
45628 +#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
45629 +
45630 +/* Stats related to multi TX queues */
45631 +static const struct be_ethtool_stat et_tx_stats[] = {
45632 + {DRVSTAT_TX_INFO(be_tx_rate)},
45633 + {DRVSTAT_TX_INFO(be_tx_reqs)},
45634 + {DRVSTAT_TX_INFO(be_tx_wrbs)},
45635 + {DRVSTAT_TX_INFO(be_tx_stops)},
45636 + {DRVSTAT_TX_INFO(be_tx_compl)},
45637 + {DRVSTAT_TX_INFO(be_ipv6_ext_hdr_tx_drop)}
45638 +};
45639 +#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
45640 +
45641 +static const char et_self_tests[][ETH_GSTRING_LEN] = {
45642 + "MAC Loopback test",
45643 + "PHY Loopback test",
45644 + "External Loopback test",
45645 + "DDR DMA test",
45646 + "Link test"
45647 +};
45648 +
45649 +#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
45650 +#define BE_MAC_LOOPBACK 0x0
45651 +#define BE_PHY_LOOPBACK 0x1
45652 +#define BE_ONE_PORT_EXT_LOOPBACK 0x2
45653 +#define BE_NO_LOOPBACK 0xff
45654 +
45655 +/* MAC speed valid values */
45656 +#define SPEED_DEFAULT 0x0
45657 +#define SPEED_FORCED_10GB 0x1
45658 +#define SPEED_FORCED_1GB 0x2
45659 +#define SPEED_AUTONEG_10GB 0x3
45660 +#define SPEED_AUTONEG_1GB 0x4
45661 +#define SPEED_AUTONEG_100MB 0x5
45662 +#define SPEED_AUTONEG_10GB_1GB 0x6
45663 +#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
45664 +#define SPEED_AUTONEG_1GB_100MB 0x8
45665 +#define SPEED_AUTONEG_10MB 0x9
45666 +#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
45667 +#define SPEED_AUTONEG_100MB_10MB 0xb
45668 +#define SPEED_FORCED_100MB 0xc
45669 +#define SPEED_FORCED_10MB 0xd
45670 +
45671 +
45672 +
45673 static void
45674 be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
45675 {
45676 struct be_adapter *adapter = netdev_priv(netdev);
45677 + int len;
45678 + char fw_on_flash[FW_VER_LEN];
45679 +
45680 + memset(fw_on_flash, 0 , sizeof(fw_on_flash));
45681 +
45682 + be_cmd_get_fw_ver(adapter, adapter->fw_ver,
45683 + fw_on_flash);
45684
45685 strcpy(drvinfo->driver, DRV_NAME);
45686 strcpy(drvinfo->version, DRV_VER);
45687 +
45688 strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
45689 + if (memcmp(adapter->fw_ver, fw_on_flash,
45690 + FW_VER_LEN) != 0) {
45691 + len = strlen(drvinfo->fw_version);
45692 + strncpy(drvinfo->fw_version+len, " [",
45693 + FW_VER_LEN-len-1);
45694 + len = strlen(drvinfo->fw_version);
45695 + strncpy(drvinfo->fw_version+len, fw_on_flash,
45696 + FW_VER_LEN-len-1);
45697 + len = strlen(drvinfo->fw_version);
45698 + strncpy(drvinfo->fw_version+len, "]", FW_VER_LEN-len-1);
45699 + }
45700 +
45701 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
45702 drvinfo->testinfo_len = 0;
45703 drvinfo->regdump_len = 0;
45704 @@ -122,12 +181,37 @@ be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
45705 }
45706
45707 static int
45708 +be_get_reg_len(struct net_device *netdev)
45709 +{
45710 + struct be_adapter *adapter = netdev_priv(netdev);
45711 + u32 log_size = 0;
45712 +
45713 + if (be_physfn(adapter))
45714 + be_cmd_get_reg_len(adapter, &log_size);
45715 +
45716 + return log_size;
45717 +}
45718 +
45719 +static void
45720 +be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
45721 +{
45722 + struct be_adapter *adapter = netdev_priv(netdev);
45723 +
45724 + if (be_physfn(adapter)) {
45725 + memset(buf, 0, regs->len);
45726 + be_cmd_get_regs(adapter, regs->len, buf);
45727 + }
45728 +}
45729 +
45730 +static int
45731 be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45732 {
45733 struct be_adapter *adapter = netdev_priv(netdev);
45734 - struct be_eq_obj *rx_eq = &adapter->rx_eq;
45735 + struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
45736 struct be_eq_obj *tx_eq = &adapter->tx_eq;
45737
45738 + coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
45739 +
45740 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
45741 coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
45742 coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
45743 @@ -149,25 +233,52 @@ static int
45744 be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45745 {
45746 struct be_adapter *adapter = netdev_priv(netdev);
45747 - struct be_eq_obj *rx_eq = &adapter->rx_eq;
45748 + struct be_rx_obj *rxo;
45749 + struct be_eq_obj *rx_eq;
45750 struct be_eq_obj *tx_eq = &adapter->tx_eq;
45751 u32 tx_max, tx_min, tx_cur;
45752 u32 rx_max, rx_min, rx_cur;
45753 - int status = 0;
45754 + int status = 0, i;
45755
45756 if (coalesce->use_adaptive_tx_coalesce == 1)
45757 return -EINVAL;
45758 + adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
45759 + if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
45760 + adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
45761
45762 - /* if AIC is being turned on now, start with an EQD of 0 */
45763 - if (rx_eq->enable_aic == 0 &&
45764 - coalesce->use_adaptive_rx_coalesce == 1) {
45765 - rx_eq->cur_eqd = 0;
45766 + for_all_rx_queues(adapter, rxo, i) {
45767 + rx_eq = &rxo->rx_eq;
45768 +
45769 + if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
45770 + rx_eq->cur_eqd = 0;
45771 + rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
45772 +
45773 + rx_max = coalesce->rx_coalesce_usecs_high;
45774 + rx_min = coalesce->rx_coalesce_usecs_low;
45775 + rx_cur = coalesce->rx_coalesce_usecs;
45776 +
45777 + if (rx_eq->enable_aic) {
45778 + if (rx_max > BE_MAX_EQD)
45779 + rx_max = BE_MAX_EQD;
45780 + if (rx_min > rx_max)
45781 + rx_min = rx_max;
45782 + rx_eq->max_eqd = rx_max;
45783 + rx_eq->min_eqd = rx_min;
45784 + if (rx_eq->cur_eqd > rx_max)
45785 + rx_eq->cur_eqd = rx_max;
45786 + if (rx_eq->cur_eqd < rx_min)
45787 + rx_eq->cur_eqd = rx_min;
45788 + } else {
45789 + if (rx_cur > BE_MAX_EQD)
45790 + rx_cur = BE_MAX_EQD;
45791 + if (rx_eq->cur_eqd != rx_cur) {
45792 + status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
45793 + rx_cur);
45794 + if (!status)
45795 + rx_eq->cur_eqd = rx_cur;
45796 + }
45797 + }
45798 }
45799 - rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
45800 -
45801 - rx_max = coalesce->rx_coalesce_usecs_high;
45802 - rx_min = coalesce->rx_coalesce_usecs_low;
45803 - rx_cur = coalesce->rx_coalesce_usecs;
45804
45805 tx_max = coalesce->tx_coalesce_usecs_high;
45806 tx_min = coalesce->tx_coalesce_usecs_low;
45807 @@ -181,27 +292,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45808 tx_eq->cur_eqd = tx_cur;
45809 }
45810
45811 - if (rx_eq->enable_aic) {
45812 - if (rx_max > BE_MAX_EQD)
45813 - rx_max = BE_MAX_EQD;
45814 - if (rx_min > rx_max)
45815 - rx_min = rx_max;
45816 - rx_eq->max_eqd = rx_max;
45817 - rx_eq->min_eqd = rx_min;
45818 - if (rx_eq->cur_eqd > rx_max)
45819 - rx_eq->cur_eqd = rx_max;
45820 - if (rx_eq->cur_eqd < rx_min)
45821 - rx_eq->cur_eqd = rx_min;
45822 - } else {
45823 - if (rx_cur > BE_MAX_EQD)
45824 - rx_cur = BE_MAX_EQD;
45825 - if (rx_eq->cur_eqd != rx_cur) {
45826 - status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
45827 - rx_cur);
45828 - if (!status)
45829 - rx_eq->cur_eqd = rx_cur;
45830 - }
45831 - }
45832 return 0;
45833 }
45834
45835 @@ -229,81 +319,294 @@ be_get_ethtool_stats(struct net_device *netdev,
45836 struct ethtool_stats *stats, uint64_t *data)
45837 {
45838 struct be_adapter *adapter = netdev_priv(netdev);
45839 - struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats;
45840 - struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
45841 - struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
45842 - struct be_port_rxf_stats *port_stats =
45843 - &rxf_stats->port[adapter->port_num];
45844 - struct net_device_stats *net_stats = &adapter->stats.net_stats;
45845 - struct be_erx_stats *erx_stats = &hw_stats->erx;
45846 + struct be_rx_obj *rxo;
45847 + struct be_tx_obj *txo;
45848 void *p = NULL;
45849 - int i;
45850 + int i, j, base;
45851
45852 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
45853 switch (et_stats[i].type) {
45854 case NETSTAT:
45855 - p = net_stats;
45856 + p = &adapter->net_stats;
45857 break;
45858 case DRVSTAT:
45859 - p = drvr_stats;
45860 - break;
45861 - case PORTSTAT:
45862 - p = port_stats;
45863 - break;
45864 - case MISCSTAT:
45865 - p = rxf_stats;
45866 - break;
45867 - case ERXSTAT: /* Currently only one ERX stat is provided */
45868 - p = (u32 *)erx_stats + adapter->rx_obj.q.id;
45869 + p = &adapter->drv_stats;
45870 break;
45871 }
45872
45873 p = (u8 *)p + et_stats[i].offset;
45874 data[i] = (et_stats[i].size == sizeof(u64)) ?
45875 - *(u64 *)p: *(u32 *)p;
45876 + *(u64 *)p:(*(u32 *)p);
45877 }
45878
45879 - return;
45880 + base = ETHTOOL_STATS_NUM;
45881 + for_all_rx_queues(adapter, rxo, j) {
45882 + for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
45883 + p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
45884 + data[base + j * ETHTOOL_RXSTATS_NUM + i] =
45885 + (et_rx_stats[i].size == sizeof(u64)) ?
45886 + *(u64 *)p: *(u32 *)p;
45887 + }
45888 + }
45889 +
45890 + base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
45891 + for_all_tx_queues(adapter, txo, j) {
45892 + for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) {
45893 + p = (u8 *)&txo->stats + et_tx_stats[i].offset;
45894 + data[base + j * ETHTOOL_TXSTATS_NUM + i] =
45895 + (et_tx_stats[i].size == sizeof(u64)) ?
45896 + *(u64 *)p: *(u32 *)p;
45897 + }
45898 + }
45899 }
45900
45901 static void
45902 be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
45903 uint8_t *data)
45904 {
45905 - int i;
45906 + struct be_adapter *adapter = netdev_priv(netdev);
45907 + int i, j;
45908 +
45909 switch (stringset) {
45910 case ETH_SS_STATS:
45911 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
45912 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
45913 data += ETH_GSTRING_LEN;
45914 }
45915 + for (i = 0; i < adapter->num_rx_qs; i++) {
45916 + for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
45917 + sprintf(data, "rxq%d: %s", i,
45918 + et_rx_stats[j].desc);
45919 + data += ETH_GSTRING_LEN;
45920 + }
45921 + }
45922 + for (i = 0; i < adapter->num_tx_qs; i++) {
45923 + for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
45924 + sprintf(data, "txq%d: %s", i,
45925 + et_tx_stats[j].desc);
45926 + data += ETH_GSTRING_LEN;
45927 + }
45928 + }
45929 + break;
45930 + case ETH_SS_TEST:
45931 + for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
45932 + memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
45933 + data += ETH_GSTRING_LEN;
45934 + }
45935 break;
45936 }
45937 }
45938
45939 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45940 static int be_get_stats_count(struct net_device *netdev)
45941 {
45942 - return ETHTOOL_STATS_NUM;
45943 + struct be_adapter *adapter = netdev_priv(netdev);
45944 +
45945 + return ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM
45946 + + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
45947 }
45948 +static int
45949 +be_self_test_count(struct net_device *dev)
45950 +{
45951 + return ETHTOOL_TESTS_NUM;
45952 +}
45953 +#else
45954 +
45955 +static int be_get_sset_count(struct net_device *netdev, int stringset)
45956 +{
45957 + struct be_adapter *adapter = netdev_priv(netdev);
45958 +
45959 + switch (stringset) {
45960 + case ETH_SS_TEST:
45961 + return ETHTOOL_TESTS_NUM;
45962 + case ETH_SS_STATS:
45963 + return ETHTOOL_STATS_NUM +
45964 + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
45965 + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
45966 + default:
45967 + return -EINVAL;
45968 + }
45969 +}
45970 +#endif
45971
45972 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
45973 {
45974 - ecmd->speed = SPEED_10000;
45975 + struct be_adapter *adapter = netdev_priv(netdev);
45976 + struct be_phy_info phy_info;
45977 + u8 mac_speed = 0;
45978 + u16 link_speed = 0;
45979 + int link_status = LINK_DOWN;
45980 + int status;
45981 +
45982 + if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
45983 + status = be_cmd_link_status_query(adapter, &link_status,
45984 + &mac_speed, &link_speed, 0);
45985 +
45986 + be_link_status_update(adapter, link_status);
45987 + /* link_speed is in units of 10 Mbps */
45988 + if (link_speed) {
45989 + ecmd->speed = link_speed*10;
45990 + } else {
45991 + switch (mac_speed) {
45992 + case PHY_LINK_SPEED_10MBPS:
45993 + ecmd->speed = SPEED_10;
45994 + break;
45995 + case PHY_LINK_SPEED_100MBPS:
45996 + ecmd->speed = SPEED_100;
45997 + break;
45998 + case PHY_LINK_SPEED_1GBPS:
45999 + ecmd->speed = SPEED_1000;
46000 + break;
46001 + case PHY_LINK_SPEED_10GBPS:
46002 + ecmd->speed = SPEED_10000;
46003 + break;
46004 + case PHY_LINK_SPEED_ZERO:
46005 + ecmd->speed = 0;
46006 + break;
46007 + }
46008 + }
46009 +
46010 + status = be_cmd_get_phy_info(adapter, &phy_info);
46011 + if (!status) {
46012 + switch (phy_info.interface_type) {
46013 + case PHY_TYPE_XFP_10GB:
46014 + case PHY_TYPE_SFP_1GB:
46015 + case PHY_TYPE_SFP_PLUS_10GB:
46016 + ecmd->port = PORT_FIBRE;
46017 + break;
46018 + default:
46019 + ecmd->port = PORT_TP;
46020 + break;
46021 + }
46022 +
46023 + switch (phy_info.interface_type) {
46024 + case PHY_TYPE_KR_10GB:
46025 + case PHY_TYPE_KX4_10GB:
46026 + ecmd->transceiver = XCVR_INTERNAL;
46027 + break;
46028 + default:
46029 + ecmd->transceiver = XCVR_EXTERNAL;
46030 + break;
46031 + }
46032 +
46033 + if (phy_info.auto_speeds_supported) {
46034 + ecmd->supported |= SUPPORTED_Autoneg;
46035 + ecmd->autoneg = AUTONEG_ENABLE;
46036 + ecmd->advertising |= ADVERTISED_Autoneg;
46037 + }
46038 +
46039 + if (phy_info.misc_params & BE_PAUSE_SYM_EN) {
46040 + ecmd->supported |= SUPPORTED_Pause;
46041 + ecmd->advertising |= ADVERTISED_Pause;
46042 + }
46043 +
46044 + }
46045 +
46046 + /* Save for future use */
46047 + adapter->link_speed = ecmd->speed;
46048 + adapter->port_type = ecmd->port;
46049 + adapter->transceiver = ecmd->transceiver;
46050 + adapter->autoneg = ecmd->autoneg;
46051 + } else {
46052 + ecmd->speed = adapter->link_speed;
46053 + ecmd->port = adapter->port_type;
46054 + ecmd->transceiver = adapter->transceiver;
46055 + ecmd->autoneg = adapter->autoneg;
46056 + }
46057 +
46058 ecmd->duplex = DUPLEX_FULL;
46059 - ecmd->autoneg = AUTONEG_DISABLE;
46060 + ecmd->phy_address = (adapter->hba_port_num << 4) |
46061 + (adapter->port_name[adapter->hba_port_num]);
46062 + switch (ecmd->port) {
46063 + case PORT_FIBRE:
46064 + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
46065 + break;
46066 + case PORT_TP:
46067 + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
46068 + break;
46069 + }
46070 +
46071 + if (ecmd->autoneg) {
46072 + ecmd->supported |= SUPPORTED_1000baseT_Full;
46073 + ecmd->advertising |= (ADVERTISED_10000baseT_Full |
46074 + ADVERTISED_1000baseT_Full);
46075 + }
46076 +
46077 return 0;
46078 }
46079
46080 +static int be_set_settings(struct net_device *netdev,
46081 + struct ethtool_cmd *ecmd)
46082 +{
46083 + struct be_adapter *adapter = netdev_priv(netdev);
46084 + struct be_phy_info phy_info;
46085 + u16 mac_speed=0;
46086 + u16 dac_cable_len=0;
46087 + u16 port_speed = 0;
46088 + int status;
46089 +
46090 + status = be_cmd_get_phy_info(adapter, &phy_info);
46091 + if (status) {
46092 + dev_warn(&adapter->pdev->dev, "port speed set failed.\n");
46093 + return status;
46094 + }
46095 +
46096 + if (ecmd->autoneg == AUTONEG_ENABLE) {
46097 + switch(phy_info.interface_type) {
46098 + case PHY_TYPE_SFP_1GB:
46099 + case PHY_TYPE_BASET_1GB:
46100 + case PHY_TYPE_BASEX_1GB:
46101 + case PHY_TYPE_SGMII:
46102 + mac_speed = SPEED_AUTONEG_1GB_100MB_10MB;
46103 + break;
46104 + case PHY_TYPE_SFP_PLUS_10GB:
46105 + dev_warn(&adapter->pdev->dev,
46106 + "Autoneg not supported on this module. \n");
46107 + return -EINVAL;
46108 + case PHY_TYPE_KR_10GB:
46109 + case PHY_TYPE_KX4_10GB:
46110 + mac_speed = SPEED_AUTONEG_10GB_1GB;
46111 + break;
46112 + case PHY_TYPE_BASET_10GB:
46113 + mac_speed = SPEED_AUTONEG_10GB_1GB_100MB;
46114 + break;
46115 + }
46116 + } else if(ecmd->autoneg == AUTONEG_DISABLE) {
46117 + if(ecmd->speed == SPEED_10) {
46118 + mac_speed = SPEED_FORCED_10MB;
46119 + } else if(ecmd->speed == SPEED_100) {
46120 + mac_speed = SPEED_FORCED_100MB;
46121 + } else if(ecmd->speed == SPEED_1000) {
46122 + mac_speed = SPEED_FORCED_1GB;
46123 + } else if(ecmd->speed == SPEED_10000) {
46124 + mac_speed = SPEED_FORCED_10GB;
46125 + }
46126 + }
46127 +
46128 + status = be_cmd_get_port_speed(adapter, adapter->hba_port_num,
46129 + &dac_cable_len, &port_speed);
46130 +
46131 + if (!status && port_speed != mac_speed)
46132 + status = be_cmd_set_port_speed_v1(adapter,
46133 + adapter->hba_port_num, mac_speed,
46134 + dac_cable_len);
46135 + if (status)
46136 + dev_warn(&adapter->pdev->dev, "port speed set failed.\n");
46137 +
46138 + return status;
46139 +
46140 +}
46141 +
46142 static void
46143 be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
46144 {
46145 struct be_adapter *adapter = netdev_priv(netdev);
46146
46147 - ring->rx_max_pending = adapter->rx_obj.q.len;
46148 - ring->tx_max_pending = adapter->tx_obj.q.len;
46149 + ring->rx_max_pending = adapter->rx_obj[0].q.len;
46150 + ring->tx_max_pending = adapter->tx_obj[0].q.len;
46151
46152 - ring->rx_pending = atomic_read(&adapter->rx_obj.q.used);
46153 - ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
46154 + ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
46155 + ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
46156 }
46157
46158 static void
46159 @@ -312,7 +615,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
46160 struct be_adapter *adapter = netdev_priv(netdev);
46161
46162 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
46163 - ecmd->autoneg = 0;
46164 + ecmd->autoneg = adapter->autoneg;
46165 }
46166
46167 static int
46168 @@ -334,6 +637,203 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
46169 return status;
46170 }
46171
46172 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
46173 +static int
46174 +be_phys_id(struct net_device *netdev, u32 data)
46175 +{
46176 + struct be_adapter *adapter = netdev_priv(netdev);
46177 + int status;
46178 + u32 cur;
46179 +
46180 + be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
46181 +
46182 + if (cur == BEACON_STATE_ENABLED)
46183 + return 0;
46184 +
46185 + if (data < 2)
46186 + data = 2;
46187 +
46188 + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46189 + BEACON_STATE_ENABLED);
46190 + set_current_state(TASK_INTERRUPTIBLE);
46191 + schedule_timeout(data*HZ);
46192 +
46193 + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46194 + BEACON_STATE_DISABLED);
46195 +
46196 + return status;
46197 +}
46198 +#else
46199 +static int
46200 +be_set_phys_id(struct net_device *netdev,
46201 + enum ethtool_phys_id_state state)
46202 +{
46203 + struct be_adapter *adapter = netdev_priv(netdev);
46204 +
46205 + switch (state) {
46206 + case ETHTOOL_ID_ACTIVE:
46207 + be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
46208 + &adapter->beacon_state);
46209 + return 1; /* cycle on/off once per second */
46210 +
46211 + case ETHTOOL_ID_ON:
46212 + be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46213 + BEACON_STATE_ENABLED);
46214 + break;
46215 +
46216 + case ETHTOOL_ID_OFF:
46217 + be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46218 + BEACON_STATE_DISABLED);
46219 + break;
46220 +
46221 + case ETHTOOL_ID_INACTIVE:
46222 + be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46223 + adapter->beacon_state);
46224 + }
46225 +
46226 + return 0;
46227 +}
46228 +#endif
46229 +
46230 +static bool
46231 +be_is_wol_supported(struct be_adapter *adapter)
46232 +{
46233 + struct pci_dev *pdev = adapter->pdev;
46234 +
46235 + if (!be_physfn(adapter))
46236 + return false;
46237 +
46238 + switch (pdev->subsystem_device) {
46239 + case OC_SUBSYS_DEVICE_ID1:
46240 + case OC_SUBSYS_DEVICE_ID2:
46241 + case OC_SUBSYS_DEVICE_ID3:
46242 + case OC_SUBSYS_DEVICE_ID4:
46243 + return false;
46244 + default:
46245 + return true;
46246 + }
46247 +}
46248 +
46249 +static void
46250 +be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
46251 +{
46252 + struct be_adapter *adapter = netdev_priv(netdev);
46253 +
46254 + if (be_is_wol_supported(adapter))
46255 + wol->supported = WAKE_MAGIC;
46256 + if (adapter->wol)
46257 + wol->wolopts = WAKE_MAGIC;
46258 + else
46259 + wol->wolopts = 0;
46260 + memset(&wol->sopass, 0, sizeof(wol->sopass));
46261 +}
46262 +
46263 +static int
46264 +be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
46265 +{
46266 + struct be_adapter *adapter = netdev_priv(netdev);
46267 +
46268 + if (wol->wolopts & ~WAKE_MAGIC)
46269 + return -EOPNOTSUPP;
46270 +
46271 + if (!be_is_wol_supported(adapter)) {
46272 + dev_warn(&adapter->pdev->dev,
46273 + "WOL not supported for this subsystemid: %x\n",
46274 + adapter->pdev->subsystem_device);
46275 + return -EOPNOTSUPP;
46276 + }
46277 +
46278 + if (wol->wolopts & WAKE_MAGIC)
46279 + adapter->wol = true;
46280 + else
46281 + adapter->wol = false;
46282 +
46283 + return 0;
46284 +}
46285 +
46286 +static int
46287 +be_test_ddr_dma(struct be_adapter *adapter)
46288 +{
46289 + int ret, i;
46290 + struct be_dma_mem ddrdma_cmd;
46291 + u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
46292 +
46293 + ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
46294 + ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
46295 + &ddrdma_cmd.dma);
46296 + if (!ddrdma_cmd.va) {
46297 + dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
46298 + return -ENOMEM;
46299 + }
46300 +
46301 + for (i = 0; i < 2; i++) {
46302 + ret = be_cmd_ddr_dma_test(adapter, pattern[i],
46303 + 4096, &ddrdma_cmd);
46304 + if (ret != 0)
46305 + goto err;
46306 + }
46307 +
46308 +err:
46309 + pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
46310 + ddrdma_cmd.va, ddrdma_cmd.dma);
46311 + return ret;
46312 +}
46313 +
46314 +static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
46315 + u64 *status)
46316 +{
46317 + be_cmd_set_loopback(adapter, adapter->hba_port_num,
46318 + loopback_type, 1);
46319 + *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
46320 + loopback_type, 1500,
46321 + 2, 0xabc);
46322 + be_cmd_set_loopback(adapter, adapter->hba_port_num,
46323 + BE_NO_LOOPBACK, 1);
46324 + return *status;
46325 +}
46326 +
46327 +static void
46328 +be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
46329 +{
46330 + struct be_adapter *adapter = netdev_priv(netdev);
46331 + int link_status;
46332 + u8 mac_speed = 0;
46333 + u16 qos_link_speed = 0;
46334 +
46335 + memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
46336 +
46337 + if (test->flags & ETH_TEST_FL_OFFLINE) {
46338 + if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
46339 + &data[0]) != 0) {
46340 + test->flags |= ETH_TEST_FL_FAILED;
46341 + }
46342 + if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
46343 + &data[1]) != 0) {
46344 + test->flags |= ETH_TEST_FL_FAILED;
46345 + }
46346 + if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
46347 + &data[2]) != 0) {
46348 + test->flags |= ETH_TEST_FL_FAILED;
46349 + }
46350 + }
46351 +
46352 + if (be_test_ddr_dma(adapter) != 0) {
46353 + data[3] = 1;
46354 + test->flags |= ETH_TEST_FL_FAILED;
46355 + }
46356 +
46357 + if (be_cmd_link_status_query(adapter, &link_status, &mac_speed,
46358 + &qos_link_speed, 0) != 0) {
46359 + test->flags |= ETH_TEST_FL_FAILED;
46360 + data[4] = -1;
46361 + } else if (!mac_speed) {
46362 + test->flags |= ETH_TEST_FL_FAILED;
46363 + data[4] = 1;
46364 + }
46365 +
46366 +}
46367 +
46368 +#ifdef HAVE_ETHTOOL_FLASH
46369 static int
46370 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
46371 {
46372 @@ -347,11 +847,73 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
46373
46374 return be_load_fw(adapter, file_name);
46375 }
46376 +#endif
46377
46378 -const struct ethtool_ops be_ethtool_ops = {
46379 +static int
46380 +be_get_eeprom_len(struct net_device *netdev)
46381 +{
46382 + return BE_READ_SEEPROM_LEN;
46383 +}
46384 +
46385 +static int
46386 +be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
46387 + uint8_t *data)
46388 +{
46389 + struct be_adapter *adapter = netdev_priv(netdev);
46390 + struct be_dma_mem eeprom_cmd;
46391 + struct be_cmd_resp_seeprom_read *resp;
46392 + int status;
46393 +
46394 + if (!eeprom->len)
46395 + return -EINVAL;
46396 +
46397 + eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
46398 +
46399 + memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
46400 + eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
46401 + eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
46402 + &eeprom_cmd.dma);
46403 +
46404 + if (!eeprom_cmd.va) {
46405 + dev_err(&adapter->pdev->dev,
46406 + "Memory allocation failure. Could not read eeprom\n");
46407 + return -ENOMEM;
46408 + }
46409 +
46410 + status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
46411 +
46412 + if (!status) {
46413 + resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
46414 + memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
46415 + }
46416 + pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
46417 + eeprom_cmd.dma);
46418 +
46419 + return status;
46420 +}
46421 +
46422 +static int be_set_tso(struct net_device *netdev, uint32_t data)
46423 +{
46424 + if (data) {
46425 + netdev->features |= NETIF_F_TSO;
46426 + netdev->features |= NETIF_F_TSO6;
46427 + } else {
46428 + netdev->features &= ~NETIF_F_TSO;
46429 + netdev->features &= ~NETIF_F_TSO6;
46430 + }
46431 + return 0;
46432 +}
46433 +
46434 +
46435 +struct ethtool_ops be_ethtool_ops = {
46436 .get_settings = be_get_settings,
46437 + .set_settings = be_set_settings,
46438 .get_drvinfo = be_get_drvinfo,
46439 + .get_wol = be_get_wol,
46440 + .set_wol = be_set_wol,
46441 .get_link = ethtool_op_get_link,
46442 + .get_eeprom_len = be_get_eeprom_len,
46443 + .get_eeprom = be_read_eeprom,
46444 .get_coalesce = be_get_coalesce,
46445 .set_coalesce = be_set_coalesce,
46446 .get_ringparam = be_get_ringparam,
46447 @@ -364,9 +926,21 @@ const struct ethtool_ops be_ethtool_ops = {
46448 .get_sg = ethtool_op_get_sg,
46449 .set_sg = ethtool_op_set_sg,
46450 .get_tso = ethtool_op_get_tso,
46451 - .set_tso = ethtool_op_set_tso,
46452 + .set_tso = be_set_tso,
46453 .get_strings = be_get_stat_strings,
46454 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
46455 + .phys_id = be_phys_id,
46456 .get_stats_count = be_get_stats_count,
46457 + .self_test_count = be_self_test_count,
46458 +#else
46459 + .set_phys_id = be_set_phys_id,
46460 + .get_sset_count = be_get_sset_count,
46461 +#endif
46462 .get_ethtool_stats = be_get_ethtool_stats,
46463 + .get_regs_len = be_get_reg_len,
46464 + .get_regs = be_get_regs,
46465 +#ifdef HAVE_ETHTOOL_FLASH
46466 .flash_device = be_do_flash,
46467 +#endif
46468 + .self_test = be_self_test
46469 };
46470 diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
46471 index a3394b4..f871d8c 100644
46472 --- a/drivers/net/benet/be_hw.h
46473 +++ b/drivers/net/benet/be_hw.h
46474 @@ -1,18 +1,18 @@
46475 /*
46476 - * Copyright (C) 2005 - 2009 ServerEngines
46477 + * Copyright (C) 2005 - 2011 Emulex
46478 * All rights reserved.
46479 *
46480 * This program is free software; you can redistribute it and/or
46481 * modify it under the terms of the GNU General Public License version 2
46482 - * as published by the Free Software Foundation. The full GNU General
46483 + * as published by the Free Software Foundation. The full GNU General
46484 * Public License is included in this distribution in the file called COPYING.
46485 *
46486 * Contact Information:
46487 - * linux-drivers@serverengines.com
46488 + * linux-drivers@emulex.com
46489 *
46490 - * ServerEngines
46491 - * 209 N. Fair Oaks Ave
46492 - * Sunnyvale, CA 94085
46493 + * Emulex
46494 + * 3333 Susan Street
46495 + * Costa Mesa, CA 92626
46496 */
46497
46498 /********* Mailbox door bell *************/
46499 @@ -26,24 +26,34 @@
46500 * queue entry.
46501 */
46502 #define MPU_MAILBOX_DB_OFFSET 0x160
46503 -#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
46504 +#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
46505 #define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
46506
46507 -#define MPU_EP_CONTROL 0
46508 +#define MPU_EP_CONTROL 0
46509
46510 /********** MPU semphore ******************/
46511 -#define MPU_EP_SEMAPHORE_OFFSET 0xac
46512 +#define MPU_EP_SEMAPHORE_OFFSET 0xac
46513 +#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
46514 #define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
46515 #define EP_SEMAPHORE_POST_ERR_MASK 0x1
46516 #define EP_SEMAPHORE_POST_ERR_SHIFT 31
46517 /* MPU semphore POST stage values */
46518 -#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
46519 -#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
46520 +#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
46521 +#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
46522 #define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
46523 #define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
46524
46525 +/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
46526 +#define SLIPORT_STATUS_OFFSET 0x404
46527 +#define SLIPORT_CONTROL_OFFSET 0x408
46528 +
46529 +#define SLIPORT_STATUS_ERR_MASK 0x80000000
46530 +#define SLIPORT_STATUS_RN_MASK 0x01000000
46531 +#define SLIPORT_STATUS_RDY_MASK 0x00800000
46532 +#define SLI_PORT_CONTROL_IP_MASK 0x08000000
46533 +
46534 /********* Memory BAR register ************/
46535 -#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
46536 +#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
46537 /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
46538 * Disable" may still globally block interrupts in addition to individual
46539 * interrupt masks; a mechanism for the device driver to block all interrupts
46540 @@ -52,13 +62,70 @@
46541 */
46542 #define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
46543
46544 +/********* Link Status CSR ****************/
46545 +#define PCICFG_PCIE_LINK_STATUS_OFFSET 0xd0
46546 +#define PCIE_LINK_STATUS_SPEED_MASK 0xFF /* bits 16 - 19 */
46547 +#define PCIE_LINK_STATUS_SPEED_SHIFT 16
46548 +#define PCIE_LINK_STATUS_NEG_WIDTH_MASK 0x3F /* bits 20 - 25 */
46549 +#define PCIE_LINK_STATUS_NEG_WIDTH_SHIFT 20
46550 +
46551 +/********* Link Capability CSR ************/
46552 +#define PCICFG_PCIE_LINK_CAP_OFFSET 0xcc
46553 +#define PCIE_LINK_CAP_MAX_SPEED_MASK 0xFF /* bits 0 - 3 */
46554 +#define PCIE_LINK_CAP_MAX_SPEED_SHIFT 0
46555 +#define PCIE_LINK_CAP_MAX_WIDTH_MASK 0x3F /* bits 4 - 9 */
46556 +#define PCIE_LINK_CAP_MAX_WIDTH_SHIFT 4
46557 +
46558 +/********* PCI Function Capability ************/
46559 +#define BE_FUNCTION_CAPS_UNCLASSIFIED_STATS 0x1
46560 +#define BE_FUNCTION_CAPS_RSS 0x2
46561 +#define BE_FUNCTION_CAPS_PROMISCUOUS 0x4
46562 +#define BE_FUNCTION_CAPS_LEGACY_MODE 0x8
46563 +
46564 +/********* Power managment (WOL) **********/
46565 +#define PCICFG_PM_CONTROL_OFFSET 0x44
46566 +#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
46567 +
46568 +/********* Online Control Registers *******/
46569 +#define PCICFG_ONLINE0 0xB0
46570 +#define PCICFG_ONLINE1 0xB4
46571 +
46572 +/********* UE Status and Mask Registers ***/
46573 +#define PCICFG_UE_STATUS_LOW 0xA0
46574 +#define PCICFG_UE_STATUS_HIGH 0xA4
46575 +#define PCICFG_UE_STATUS_LOW_MASK 0xA8
46576 +#define PCICFG_UE_STATUS_HI_MASK 0xAC
46577 +
46578 +/******** SLI_INTF ***********************/
46579 +#define SLI_INTF_REG_OFFSET 0x58
46580 +#define SLI_INTF_VALID_MASK 0xE0000000
46581 +#define SLI_INTF_VALID 0xC0000000
46582 +#define SLI_INTF_HINT2_MASK 0x1F000000
46583 +#define SLI_INTF_HINT2_SHIFT 24
46584 +#define SLI_INTF_HINT1_MASK 0x00FF0000
46585 +#define SLI_INTF_HINT1_SHIFT 16
46586 +#define SLI_INTF_FAMILY_MASK 0x00000F00
46587 +#define SLI_INTF_FAMILY_SHIFT 8
46588 +#define SLI_INTF_IF_TYPE_MASK 0x0000F000
46589 +#define SLI_INTF_IF_TYPE_SHIFT 12
46590 +#define SLI_INTF_REV_MASK 0x000000F0
46591 +#define SLI_INTF_REV_SHIFT 4
46592 +#define SLI_INTF_FT_MASK 0x00000001
46593 +
46594 +/* SLI family */
46595 +#define BE_SLI_FAMILY 0x0
46596 +#define LANCER_A0_SLI_FAMILY 0xA
46597 +
46598 /********* ISR0 Register offset **********/
46599 -#define CEV_ISR0_OFFSET 0xC18
46600 +#define CEV_ISR0_OFFSET 0xC18
46601 #define CEV_ISR_SIZE 4
46602
46603 /********* Event Q door bell *************/
46604 #define DB_EQ_OFFSET DB_CQ_OFFSET
46605 #define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
46606 +#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
46607 +#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
46608 +
46609 /* Clear the interrupt for this eq */
46610 #define DB_EQ_CLR_SHIFT (9) /* bit 9 */
46611 /* Must be 1 */
46612 @@ -69,12 +136,16 @@
46613 #define DB_EQ_REARM_SHIFT (29) /* bit 29 */
46614
46615 /********* Compl Q door bell *************/
46616 -#define DB_CQ_OFFSET 0x120
46617 +#define DB_CQ_OFFSET 0x120
46618 #define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
46619 +#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
46620 +#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
46621 + placing at 11-15 */
46622 +
46623 /* Number of event entries processed */
46624 -#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
46625 +#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
46626 /* Rearm bit */
46627 -#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
46628 +#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
46629
46630 /********** TX ULP door bell *************/
46631 #define DB_TXULP1_OFFSET 0x60
46632 @@ -84,25 +155,103 @@
46633 #define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
46634
46635 /********** RQ(erx) door bell ************/
46636 -#define DB_RQ_OFFSET 0x100
46637 +#define DB_RQ_OFFSET 0x100
46638 #define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
46639 /* Number of rx frags posted */
46640 #define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
46641
46642 /********** MCC door bell ************/
46643 -#define DB_MCCQ_OFFSET 0x140
46644 +#define DB_MCCQ_OFFSET 0x140
46645 #define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
46646 /* Number of entries posted */
46647 #define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
46648
46649 +/********** SRIOV VF PCICFG OFFSET ********/
46650 +#define SRIOV_VF_PCICFG_OFFSET (4096)
46651 +
46652 +/********** FAT TABLE ********/
46653 +#define RETRIEVE_FAT 0
46654 +#define QUERY_FAT 1
46655 +
46656 +/* Flashrom related descriptors */
46657 +#define IMAGE_TYPE_FIRMWARE 160
46658 +#define IMAGE_TYPE_BOOTCODE 224
46659 +#define IMAGE_TYPE_OPTIONROM 32
46660 +
46661 +#define NUM_FLASHDIR_ENTRIES 32
46662 +
46663 +#define IMG_TYPE_ISCSI_ACTIVE 0
46664 +#define IMG_TYPE_REDBOOT 1
46665 +#define IMG_TYPE_BIOS 2
46666 +#define IMG_TYPE_PXE_BIOS 3
46667 +#define IMG_TYPE_FCOE_BIOS 8
46668 +#define IMG_TYPE_ISCSI_BACKUP 9
46669 +#define IMG_TYPE_FCOE_FW_ACTIVE 10
46670 +#define IMG_TYPE_FCOE_FW_BACKUP 11
46671 +#define IMG_TYPE_NCSI_FW 13
46672 +#define IMG_TYPE_PHY_FW 99
46673 +#define TN_8022 13
46674 +
46675 +#define ILLEGAL_IOCTL_REQ 2
46676 +#define FLASHROM_OPER_PHY_FLASH 9
46677 +#define FLASHROM_OPER_PHY_SAVE 10
46678 +#define FLASHROM_OPER_FLASH 1
46679 +#define FLASHROM_OPER_SAVE 2
46680 +#define FLASHROM_OPER_REPORT 4
46681 +
46682 +#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
46683 +#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
46684 +#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
46685 +#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
46686 +#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
46687 +#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
46688 +#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
46689 +#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 (262144)
46690 +
46691 +#define FLASH_NCSI_MAGIC (0x16032009)
46692 +#define FLASH_NCSI_DISABLED (0)
46693 +#define FLASH_NCSI_ENABLED (1)
46694 +
46695 +#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
46696 +
46697 +/* Offsets for components on Flash. */
46698 +#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
46699 +#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
46700 +#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
46701 +#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
46702 +#define FLASH_iSCSI_BIOS_START_g2 (7340032)
46703 +#define FLASH_PXE_BIOS_START_g2 (7864320)
46704 +#define FLASH_FCoE_BIOS_START_g2 (524288)
46705 +#define FLASH_REDBOOT_START_g2 (0)
46706 +
46707 +#define FLASH_NCSI_START_g3 (15990784)
46708 +#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
46709 +#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
46710 +#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
46711 +#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
46712 +#define FLASH_iSCSI_BIOS_START_g3 (12582912)
46713 +#define FLASH_PXE_BIOS_START_g3 (13107200)
46714 +#define FLASH_FCoE_BIOS_START_g3 (13631488)
46715 +#define FLASH_REDBOOT_START_g3 (262144)
46716 +#define FLASH_PHY_FW_START_g3 (1310720)
46717 +
46718 +/************* Rx Packet Type Encoding **************/
46719 +#define BE_UNICAST_PACKET 0
46720 +#define BE_MULTICAST_PACKET 1
46721 +#define BE_BROADCAST_PACKET 2
46722 +#define BE_RSVD_PACKET 3
46723 +
46724 /*
46725 * BE descriptors: host memory data structures whose formats
46726 * are hardwired in BE silicon.
46727 */
46728 /* Event Queue Descriptor */
46729 -#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
46730 -#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
46731 -#define EQ_ENTRY_RES_ID_SHIFT 16
46732 +#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
46733 +#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
46734 +#define EQ_ENTRY_RES_ID_SHIFT 16
46735 +
46736 +#define BE_MAC_PROMISCUOUS 62 /* Promiscuous mode */
46737 +
46738 struct be_eq_entry {
46739 u32 evt;
46740 };
46741 @@ -126,7 +275,7 @@ struct amap_eth_hdr_wrb {
46742 u8 event;
46743 u8 crc;
46744 u8 forward;
46745 - u8 ipsec;
46746 + u8 lso6;
46747 u8 mgmt;
46748 u8 ipcs;
46749 u8 udpcs;
46750 @@ -151,7 +300,7 @@ struct be_eth_hdr_wrb {
46751 * offset/shift/mask of each field */
46752 struct amap_eth_tx_compl {
46753 u8 wrb_index[16]; /* dword 0 */
46754 - u8 ct[2]; /* dword 0 */
46755 + u8 ct[2]; /* dword 0 */
46756 u8 port[2]; /* dword 0 */
46757 u8 rsvd0[8]; /* dword 0 */
46758 u8 status[4]; /* dword 0 */
46759 @@ -179,10 +328,10 @@ struct be_eth_rx_d {
46760
46761 /* RX Compl Queue Descriptor */
46762
46763 -/* Pseudo amap definition for eth_rx_compl in which each bit of the
46764 - * actual structure is defined as a byte: used to calculate
46765 +/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
46766 + * each bit of the actual structure is defined as a byte: used to calculate
46767 * offset/shift/mask of each field */
46768 -struct amap_eth_rx_compl {
46769 +struct amap_eth_rx_compl_v0 {
46770 u8 vlan_tag[16]; /* dword 0 */
46771 u8 pktsize[14]; /* dword 0 */
46772 u8 port; /* dword 0 */
46773 @@ -213,39 +362,91 @@ struct amap_eth_rx_compl {
46774 u8 rsshash[32]; /* dword 3 */
46775 } __packed;
46776
46777 +/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
46778 + * each bit of the actual structure is defined as a byte: used to calculate
46779 + * offset/shift/mask of each field */
46780 +struct amap_eth_rx_compl_v1 {
46781 + u8 vlan_tag[16]; /* dword 0 */
46782 + u8 pktsize[14]; /* dword 0 */
46783 + u8 vtp; /* dword 0 */
46784 + u8 ip_opt; /* dword 0 */
46785 + u8 err; /* dword 1 */
46786 + u8 rsshp; /* dword 1 */
46787 + u8 ipf; /* dword 1 */
46788 + u8 tcpf; /* dword 1 */
46789 + u8 udpf; /* dword 1 */
46790 + u8 ipcksm; /* dword 1 */
46791 + u8 l4_cksm; /* dword 1 */
46792 + u8 ip_version; /* dword 1 */
46793 + u8 macdst[7]; /* dword 1 */
46794 + u8 rsvd0; /* dword 1 */
46795 + u8 fragndx[10]; /* dword 1 */
46796 + u8 ct[2]; /* dword 1 */
46797 + u8 sw; /* dword 1 */
46798 + u8 numfrags[3]; /* dword 1 */
46799 + u8 rss_flush; /* dword 2 */
46800 + u8 cast_enc[2]; /* dword 2 */
46801 + u8 vtm; /* dword 2 */
46802 + u8 rss_bank; /* dword 2 */
46803 + u8 port[2]; /* dword 2 */
46804 + u8 vntagp; /* dword 2 */
46805 + u8 header_len[8]; /* dword 2 */
46806 + u8 header_split[2]; /* dword 2 */
46807 + u8 rsvd1[13]; /* dword 2 */
46808 + u8 valid; /* dword 2 */
46809 + u8 rsshash[32]; /* dword 3 */
46810 +} __packed;
46811 +
46812 struct be_eth_rx_compl {
46813 u32 dw[4];
46814 };
46815
46816 -/* Flashrom related descriptors */
46817 -#define IMAGE_TYPE_FIRMWARE 160
46818 -#define IMAGE_TYPE_BOOTCODE 224
46819 -#define IMAGE_TYPE_OPTIONROM 32
46820 +struct mgmt_hba_attribs {
46821 + u8 flashrom_version_string[32];
46822 + u8 manufacturer_name[32];
46823 + u32 supported_modes;
46824 + u32 rsvd0[3];
46825 + u8 ncsi_ver_string[12];
46826 + u32 default_extended_timeout;
46827 + u8 controller_model_number[32];
46828 + u8 controller_description[64];
46829 + u8 controller_serial_number[32];
46830 + u8 ip_version_string[32];
46831 + u8 firmware_version_string[32];
46832 + u8 bios_version_string[32];
46833 + u8 redboot_version_string[32];
46834 + u8 driver_version_string[32];
46835 + u8 fw_on_flash_version_string[32];
46836 + u32 functionalities_supported;
46837 + u16 max_cdblength;
46838 + u8 asic_revision;
46839 + u8 generational_guid[16];
46840 + u8 hba_port_count;
46841 + u16 default_link_down_timeout;
46842 + u8 iscsi_ver_min_max;
46843 + u8 multifunction_device;
46844 + u8 cache_valid;
46845 + u8 hba_status;
46846 + u8 max_domains_supported;
46847 + u8 phy_port;
46848 + u32 firmware_post_status;
46849 + u32 hba_mtu[8];
46850 + u32 rsvd1[4];
46851 +};
46852
46853 -#define NUM_FLASHDIR_ENTRIES 32
46854 -
46855 -#define FLASHROM_TYPE_ISCSI_ACTIVE 0
46856 -#define FLASHROM_TYPE_BIOS 2
46857 -#define FLASHROM_TYPE_PXE_BIOS 3
46858 -#define FLASHROM_TYPE_FCOE_BIOS 8
46859 -#define FLASHROM_TYPE_ISCSI_BACKUP 9
46860 -#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10
46861 -#define FLASHROM_TYPE_FCOE_FW_BACKUP 11
46862 -
46863 -#define FLASHROM_OPER_FLASH 1
46864 -#define FLASHROM_OPER_SAVE 2
46865 -
46866 -#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
46867 -#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
46868 -
46869 -/* Offsets for components on Flash. */
46870 -#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
46871 -#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296)
46872 -#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016)
46873 -#define FLASH_FCoE_BACKUP_IMAGE_START (4980736)
46874 -#define FLASH_iSCSI_BIOS_START (7340032)
46875 -#define FLASH_PXE_BIOS_START (7864320)
46876 -#define FLASH_FCoE_BIOS_START (524288)
46877 +struct mgmt_controller_attrib {
46878 + struct mgmt_hba_attribs hba_attribs;
46879 + u16 pci_vendor_id;
46880 + u16 pci_device_id;
46881 + u16 pci_sub_vendor_id;
46882 + u16 pci_sub_system_id;
46883 + u8 pci_bus_number;
46884 + u8 pci_device_number;
46885 + u8 pci_function_number;
46886 + u8 interface_type;
46887 + u64 unique_identifier;
46888 + u32 rsvd0[5];
46889 +};
46890
46891 struct controller_id {
46892 u32 vendor;
46893 @@ -254,7 +455,20 @@ struct controller_id {
46894 u32 subdevice;
46895 };
46896
46897 -struct flash_file_hdr {
46898 +struct flash_comp {
46899 + unsigned long offset;
46900 + int optype;
46901 + int size;
46902 +};
46903 +
46904 +struct image_hdr {
46905 + u32 imageid;
46906 + u32 imageoffset;
46907 + u32 imagelength;
46908 + u32 image_checksum;
46909 + u8 image_version[32];
46910 +};
46911 +struct flash_file_hdr_g2 {
46912 u8 sign[32];
46913 u32 cksum;
46914 u32 antidote;
46915 @@ -266,6 +480,17 @@ struct flash_file_hdr {
46916 u8 build[24];
46917 };
46918
46919 +struct flash_file_hdr_g3 {
46920 + u8 sign[52];
46921 + u8 ufi_version[4];
46922 + u32 file_len;
46923 + u32 cksum;
46924 + u32 antidote;
46925 + u32 num_imgs;
46926 + u8 build[24];
46927 + u8 rsvd[32];
46928 +};
46929 +
46930 struct flash_section_hdr {
46931 u32 format_rev;
46932 u32 cksum;
46933 @@ -299,3 +524,19 @@ struct flash_section_info {
46934 struct flash_section_hdr fsec_hdr;
46935 struct flash_section_entry fsec_entry[32];
46936 };
46937 +
46938 +struct flash_ncsi_image_hdr {
46939 + u32 magic;
46940 + u8 hdr_len;
46941 + u8 type;
46942 + u16 hdr_ver;
46943 + u8 rsvd0[2];
46944 + u16 load_offset;
46945 + u32 len;
46946 + u32 flash_offset;
46947 + u8 ver[16];
46948 + u8 name[24];
46949 + u32 img_cksum;
46950 + u8 rsvd1[4];
46951 + u32 hdr_cksum;
46952 +};
46953 diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
46954 index 000e377..f501aa3 100644
46955 --- a/drivers/net/benet/be_main.c
46956 +++ b/drivers/net/benet/be_main.c
46957 @@ -1,18 +1,18 @@
46958 /*
46959 - * Copyright (C) 2005 - 2009 ServerEngines
46960 + * Copyright (C) 2005 - 2011 Emulex
46961 * All rights reserved.
46962 *
46963 * This program is free software; you can redistribute it and/or
46964 * modify it under the terms of the GNU General Public License version 2
46965 - * as published by the Free Software Foundation. The full GNU General
46966 + * as published by the Free Software Foundation. The full GNU General
46967 * Public License is included in this distribution in the file called COPYING.
46968 *
46969 * Contact Information:
46970 - * linux-drivers@serverengines.com
46971 + * linux-drivers@emulex.com
46972 *
46973 - * ServerEngines
46974 - * 209 N. Fair Oaks Ave
46975 - * Sunnyvale, CA 94085
46976 + * Emulex
46977 + * 3333 Susan Street
46978 + * Costa Mesa, CA 92626
46979 */
46980
46981 #include "be.h"
46982 @@ -22,23 +22,119 @@
46983 MODULE_VERSION(DRV_VER);
46984 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46985 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
46986 -MODULE_AUTHOR("ServerEngines Corporation");
46987 +MODULE_AUTHOR("Emulex Corporation");
46988 MODULE_LICENSE("GPL");
46989 +MODULE_INFO(supported, "external");
46990
46991 -static unsigned int rx_frag_size = 2048;
46992 -module_param(rx_frag_size, uint, S_IRUGO);
46993 -MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
46994 +static ushort rx_frag_size = 2048;
46995 +static unsigned int num_vfs;
46996 +static unsigned int msix = 1;
46997 +module_param(rx_frag_size, ushort, S_IRUGO);
46998 +module_param(num_vfs, uint, S_IRUGO);
46999 +module_param(msix, uint, S_IRUGO);
47000 +MODULE_PARM_DESC(rx_frag_size, "Size of receive fragment buffer"
47001 + " - 2048 (default), 4096 or 8192");
47002 +MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
47003 +MODULE_PARM_DESC(msix, "Enable and disable the MSI"
47004 + "x (By default MSIx is enabled)");
47005 +static unsigned int gro = 1;
47006 +module_param(gro, uint, S_IRUGO);
47007 +MODULE_PARM_DESC(gro, "Enable or Disable GRO. Enabled by default");
47008 +
47009 +static unsigned int multi_rxq = true;
47010 +module_param(multi_rxq, uint, S_IRUGO);
47011 +MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
47012
47013 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
47014 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
47015 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
47016 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
47017 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
47018 - { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
47019 + /*
47020 + * Lancer is not part of Palau 4.0
47021 + * { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47022 + */
47023 { 0 }
47024 };
47025 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47026
47027 +/* UE Status Low CSR */
47028 +static char *ue_status_low_desc[] = {
47029 + "CEV",
47030 + "CTX",
47031 + "DBUF",
47032 + "ERX",
47033 + "Host",
47034 + "MPU",
47035 + "NDMA",
47036 + "PTC ",
47037 + "RDMA ",
47038 + "RXF ",
47039 + "RXIPS ",
47040 + "RXULP0 ",
47041 + "RXULP1 ",
47042 + "RXULP2 ",
47043 + "TIM ",
47044 + "TPOST ",
47045 + "TPRE ",
47046 + "TXIPS ",
47047 + "TXULP0 ",
47048 + "TXULP1 ",
47049 + "UC ",
47050 + "WDMA ",
47051 + "TXULP2 ",
47052 + "HOST1 ",
47053 + "P0_OB_LINK ",
47054 + "P1_OB_LINK ",
47055 + "HOST_GPIO ",
47056 + "MBOX ",
47057 + "AXGMAC0",
47058 + "AXGMAC1",
47059 + "JTAG",
47060 + "MPU_INTPEND"
47061 +};
47062 +
47063 +/* UE Status High CSR */
47064 +static char *ue_status_hi_desc[] = {
47065 + "LPCMEMHOST",
47066 + "MGMT_MAC",
47067 + "PCS0ONLINE",
47068 + "MPU_IRAM",
47069 + "PCS1ONLINE",
47070 + "PCTL0",
47071 + "PCTL1",
47072 + "PMEM",
47073 + "RR",
47074 + "TXPB",
47075 + "RXPP",
47076 + "XAUI",
47077 + "TXP",
47078 + "ARM",
47079 + "IPC",
47080 + "HOST2",
47081 + "HOST3",
47082 + "HOST4",
47083 + "HOST5",
47084 + "HOST6",
47085 + "HOST7",
47086 + "HOST8",
47087 + "HOST9",
47088 + "NETC",
47089 + "Unknown",
47090 + "Unknown",
47091 + "Unknown",
47092 + "Unknown",
47093 + "Unknown",
47094 + "Unknown",
47095 + "Unknown",
47096 + "Unknown"
47097 +};
47098 +
47099 +static inline bool be_multi_rxq(struct be_adapter *adapter)
47100 +{
47101 + return (adapter->num_rx_qs > 1);
47102 +}
47103 +
47104 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
47105 {
47106 struct be_dma_mem *mem = &q->dma_mem;
47107 @@ -69,6 +165,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
47108 u32 reg = ioread32(addr);
47109 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
47110
47111 + if (adapter->eeh_err)
47112 + return;
47113 +
47114 if (!enabled && enable)
47115 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
47116 else if (enabled && !enable)
47117 @@ -84,6 +183,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
47118 u32 val = 0;
47119 val |= qid & DB_RQ_RING_ID_MASK;
47120 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
47121 +
47122 + wmb();
47123 iowrite32(val, adapter->db + DB_RQ_OFFSET);
47124 }
47125
47126 @@ -92,6 +193,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
47127 u32 val = 0;
47128 val |= qid & DB_TXULP_RING_ID_MASK;
47129 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
47130 +
47131 + wmb();
47132 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
47133 }
47134
47135 @@ -100,6 +203,12 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
47136 {
47137 u32 val = 0;
47138 val |= qid & DB_EQ_RING_ID_MASK;
47139 + val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
47140 + DB_EQ_RING_ID_EXT_MASK_SHIFT);
47141 +
47142 + if (adapter->eeh_err)
47143 + return;
47144 +
47145 if (arm)
47146 val |= 1 << DB_EQ_REARM_SHIFT;
47147 if (clear_int)
47148 @@ -113,6 +222,12 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
47149 {
47150 u32 val = 0;
47151 val |= qid & DB_CQ_RING_ID_MASK;
47152 + val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
47153 + DB_CQ_RING_ID_EXT_MASK_SHIFT);
47154 +
47155 + if (adapter->eeh_err)
47156 + return;
47157 +
47158 if (arm)
47159 val |= 1 << DB_CQ_REARM_SHIFT;
47160 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
47161 @@ -124,96 +239,250 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
47162 struct be_adapter *adapter = netdev_priv(netdev);
47163 struct sockaddr *addr = p;
47164 int status = 0;
47165 + u8 current_mac[ETH_ALEN];
47166 + u32 pmac_id = adapter->pmac_id;
47167
47168 - status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
47169 + if (!is_valid_ether_addr(addr->sa_data))
47170 + return -EADDRNOTAVAIL;
47171 +
47172 + status = be_cmd_mac_addr_query(adapter, current_mac,
47173 + MAC_ADDRESS_TYPE_NETWORK, false,
47174 + adapter->if_handle);
47175 if (status)
47176 - return status;
47177 + goto err;
47178 +
47179 + if (!memcmp(addr->sa_data, current_mac, ETH_ALEN))
47180 + goto done;
47181
47182 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
47183 - adapter->if_handle, &adapter->pmac_id);
47184 - if (!status)
47185 + adapter->if_handle, &adapter->pmac_id, 0);
47186 +
47187 + if (!status) {
47188 + status = be_cmd_pmac_del(adapter, adapter->if_handle,
47189 + pmac_id, 0);
47190 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
47191 + goto done;
47192 + }
47193
47194 - return status;
47195 +err:
47196 + if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
47197 + return -EPERM;
47198 + else
47199 + dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n",
47200 + addr->sa_data);
47201 +done:
47202 + return status;
47203 +}
47204 +
47205 +static void populate_be2_stats(struct be_adapter *adapter)
47206 +{
47207 +
47208 + struct be_drv_stats *drvs = &adapter->drv_stats;
47209 + struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
47210 + struct be_port_rxf_stats_v0 *port_stats =
47211 + be_port_rxf_stats_from_cmd(adapter);
47212 + struct be_rxf_stats_v0 *rxf_stats =
47213 + be_rxf_stats_from_cmd(adapter);
47214 +
47215 + drvs->rx_pause_frames = port_stats->rx_pause_frames;
47216 + drvs->rx_crc_errors = port_stats->rx_crc_errors;
47217 + drvs->rx_control_frames = port_stats->rx_control_frames;
47218 + drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
47219 + drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
47220 + drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
47221 + drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
47222 + drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
47223 + drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
47224 + drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
47225 + drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
47226 + drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
47227 + drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
47228 + drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
47229 + drvs->rx_input_fifo_overflow_drop =
47230 + port_stats->rx_input_fifo_overflow;
47231 + drvs->rx_dropped_header_too_small =
47232 + port_stats->rx_dropped_header_too_small;
47233 + drvs->rx_address_match_errors =
47234 + port_stats->rx_address_match_errors;
47235 + drvs->rx_alignment_symbol_errors =
47236 + port_stats->rx_alignment_symbol_errors;
47237 +
47238 + drvs->tx_pauseframes = port_stats->tx_pauseframes;
47239 + drvs->tx_controlframes = port_stats->tx_controlframes;
47240 +
47241 + if (adapter->port_num)
47242 + drvs->jabber_events =
47243 + rxf_stats->port1_jabber_events;
47244 + else
47245 + drvs->jabber_events =
47246 + rxf_stats->port0_jabber_events;
47247 + drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
47248 + drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
47249 + drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
47250 + drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
47251 + drvs->forwarded_packets = rxf_stats->forwarded_packets;
47252 + drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
47253 + drvs->rx_drops_no_tpre_descr =
47254 + rxf_stats->rx_drops_no_tpre_descr;
47255 + drvs->rx_drops_too_many_frags =
47256 + rxf_stats->rx_drops_too_many_frags;
47257 + adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
47258 +}
47259 +
47260 +static void populate_be3_stats(struct be_adapter *adapter)
47261 +{
47262 + struct be_drv_stats *drvs = &adapter->drv_stats;
47263 + struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
47264 +
47265 + struct be_rxf_stats_v1 *rxf_stats =
47266 + be_rxf_stats_from_cmd(adapter);
47267 + struct be_port_rxf_stats_v1 *port_stats =
47268 + be_port_rxf_stats_from_cmd(adapter);
47269 +
47270 + drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
47271 + drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
47272 + drvs->rx_pause_frames = port_stats->rx_pause_frames;
47273 + drvs->rx_crc_errors = port_stats->rx_crc_errors;
47274 + drvs->rx_control_frames = port_stats->rx_control_frames;
47275 + drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
47276 + drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
47277 + drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
47278 + drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
47279 + drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
47280 + drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
47281 + drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
47282 + drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
47283 + drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
47284 + drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
47285 + drvs->rx_dropped_header_too_small =
47286 + port_stats->rx_dropped_header_too_small;
47287 + drvs->rx_input_fifo_overflow_drop =
47288 + port_stats->rx_input_fifo_overflow_drop;
47289 + drvs->rx_address_match_errors =
47290 + port_stats->rx_address_match_errors;
47291 + drvs->rx_alignment_symbol_errors =
47292 + port_stats->rx_alignment_symbol_errors;
47293 + drvs->rxpp_fifo_overflow_drop =
47294 + port_stats->rxpp_fifo_overflow_drop;
47295 + drvs->tx_pauseframes = port_stats->tx_pauseframes;
47296 + drvs->tx_controlframes = port_stats->tx_controlframes;
47297 + drvs->jabber_events = port_stats->jabber_events;
47298 + drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
47299 + drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
47300 + drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
47301 + drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
47302 + drvs->forwarded_packets = rxf_stats->forwarded_packets;
47303 + drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
47304 + drvs->rx_drops_no_tpre_descr =
47305 + rxf_stats->rx_drops_no_tpre_descr;
47306 + drvs->rx_drops_too_many_frags =
47307 + rxf_stats->rx_drops_too_many_frags;
47308 + adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
47309 +}
47310 +
47311 +
47312 +static void accumulate_16bit_val(u32 *acc, u16 val)
47313 +{
47314 +#define lo(x) (x & 0xFFFF)
47315 +#define hi(x) (x & 0xFFFF0000)
47316 + bool wrapped = val < lo(*acc);
47317 + u32 newacc = hi(*acc) + val;
47318 +
47319 + if (wrapped)
47320 + newacc += 65536;
47321 + ACCESS_ONCE_RW(*acc) = newacc;
47322 +}
47323 +
47324 +void be_parse_stats(struct be_adapter *adapter)
47325 +{
47326 + struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
47327 + struct be_rx_obj *rxo;
47328 + int i;
47329 +
47330 + if (adapter->generation == BE_GEN3) {
47331 + populate_be3_stats(adapter);
47332 + } else {
47333 + populate_be2_stats(adapter);
47334 + }
47335 +
47336 + /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
47337 + for_all_rx_queues(adapter, rxo, i) {
47338 + /* below erx HW counter can actually wrap around after
47339 + * 65535. Driver accumulates a 32-bit value
47340 + */
47341 + accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
47342 + (u16)erx->rx_drops_no_fragments[rxo->q.id]);
47343 + }
47344 }
47345
47346 void netdev_stats_update(struct be_adapter *adapter)
47347 {
47348 - struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
47349 - struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
47350 - struct be_port_rxf_stats *port_stats =
47351 - &rxf_stats->port[adapter->port_num];
47352 - struct net_device_stats *dev_stats = &adapter->stats.net_stats;
47353 - struct be_erx_stats *erx_stats = &hw_stats->erx;
47354 + struct be_drv_stats *drvs = &adapter->drv_stats;
47355 + struct net_device_stats *dev_stats = &adapter->net_stats;
47356 + struct be_rx_obj *rxo;
47357 + struct be_tx_obj *txo;
47358 + unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
47359 + int i;
47360
47361 - dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
47362 - dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
47363 - dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
47364 - dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
47365 + for_all_rx_queues(adapter, rxo, i) {
47366 + pkts += rx_stats(rxo)->rx_pkts;
47367 + bytes += rx_stats(rxo)->rx_bytes;
47368 + mcast += rx_stats(rxo)->rx_mcast_pkts;
47369 + drops += rx_stats(rxo)->rx_drops_no_frags;
47370 + }
47371 + dev_stats->rx_packets = pkts;
47372 + dev_stats->rx_bytes = bytes;
47373 + dev_stats->multicast = mcast;
47374 + dev_stats->rx_dropped = drops;
47375 +
47376 + pkts = bytes = 0;
47377 + for_all_tx_queues(adapter, txo, i) {
47378 + pkts += tx_stats(txo)->be_tx_pkts;
47379 + bytes += tx_stats(txo)->be_tx_bytes;
47380 + }
47381 + dev_stats->tx_packets = pkts;
47382 + dev_stats->tx_bytes = bytes;
47383
47384 /* bad pkts received */
47385 - dev_stats->rx_errors = port_stats->rx_crc_errors +
47386 - port_stats->rx_alignment_symbol_errors +
47387 - port_stats->rx_in_range_errors +
47388 - port_stats->rx_out_range_errors +
47389 - port_stats->rx_frame_too_long +
47390 - port_stats->rx_dropped_too_small +
47391 - port_stats->rx_dropped_too_short +
47392 - port_stats->rx_dropped_header_too_small +
47393 - port_stats->rx_dropped_tcp_length +
47394 - port_stats->rx_dropped_runt +
47395 - port_stats->rx_tcp_checksum_errs +
47396 - port_stats->rx_ip_checksum_errs +
47397 - port_stats->rx_udp_checksum_errs;
47398 -
47399 - /* no space in linux buffers: best possible approximation */
47400 - dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
47401 + dev_stats->rx_errors = drvs->rx_crc_errors +
47402 + drvs->rx_alignment_symbol_errors +
47403 + drvs->rx_in_range_errors +
47404 + drvs->rx_out_range_errors +
47405 + drvs->rx_frame_too_long +
47406 + drvs->rx_dropped_too_small +
47407 + drvs->rx_dropped_too_short +
47408 + drvs->rx_dropped_header_too_small +
47409 + drvs->rx_dropped_tcp_length +
47410 + drvs->rx_dropped_runt +
47411 + drvs->rx_tcp_checksum_errs +
47412 + drvs->rx_ip_checksum_errs +
47413 + drvs->rx_udp_checksum_errs;
47414
47415 /* detailed rx errors */
47416 - dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
47417 - port_stats->rx_out_range_errors +
47418 - port_stats->rx_frame_too_long;
47419 + dev_stats->rx_length_errors = drvs->rx_in_range_errors +
47420 + drvs->rx_out_range_errors +
47421 + drvs->rx_frame_too_long;
47422
47423 - /* receive ring buffer overflow */
47424 - dev_stats->rx_over_errors = 0;
47425 -
47426 - dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
47427 + dev_stats->rx_crc_errors = drvs->rx_crc_errors;
47428
47429 /* frame alignment errors */
47430 - dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
47431 + dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
47432
47433 /* receiver fifo overrun */
47434 /* drops_no_pbuf is no per i/f, it's per BE card */
47435 - dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
47436 - port_stats->rx_input_fifo_overflow +
47437 - rxf_stats->rx_drops_no_pbuf;
47438 - /* receiver missed packetd */
47439 - dev_stats->rx_missed_errors = 0;
47440 -
47441 - /* packet transmit problems */
47442 - dev_stats->tx_errors = 0;
47443 -
47444 - /* no space available in linux */
47445 - dev_stats->tx_dropped = 0;
47446 -
47447 - dev_stats->multicast = port_stats->rx_multicast_frames;
47448 - dev_stats->collisions = 0;
47449 -
47450 - /* detailed tx_errors */
47451 - dev_stats->tx_aborted_errors = 0;
47452 - dev_stats->tx_carrier_errors = 0;
47453 - dev_stats->tx_fifo_errors = 0;
47454 - dev_stats->tx_heartbeat_errors = 0;
47455 - dev_stats->tx_window_errors = 0;
47456 + dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
47457 + drvs->rx_input_fifo_overflow_drop +
47458 + drvs->rx_drops_no_pbuf;
47459 }
47460
47461 -void be_link_status_update(struct be_adapter *adapter, bool link_up)
47462 +void be_link_status_update(struct be_adapter *adapter, int link_status)
47463 {
47464 struct net_device *netdev = adapter->netdev;
47465
47466 /* If link came up or went down */
47467 - if (adapter->link_up != link_up) {
47468 - if (link_up) {
47469 + if (adapter->link_status != link_status) {
47470 + adapter->link_speed = -1;
47471 + if (link_status == LINK_UP) {
47472 netif_start_queue(netdev);
47473 netif_carrier_on(netdev);
47474 printk(KERN_INFO "%s: Link up\n", netdev->name);
47475 @@ -222,15 +491,15 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
47476 netif_carrier_off(netdev);
47477 printk(KERN_INFO "%s: Link down\n", netdev->name);
47478 }
47479 - adapter->link_up = link_up;
47480 + adapter->link_status = link_status;
47481 }
47482 }
47483
47484 /* Update the EQ delay n BE based on the RX frags consumed / sec */
47485 -static void be_rx_eqd_update(struct be_adapter *adapter)
47486 +static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
47487 {
47488 - struct be_eq_obj *rx_eq = &adapter->rx_eq;
47489 - struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
47490 + struct be_eq_obj *rx_eq = &rxo->rx_eq;
47491 + struct be_rx_stats *stats = &rxo->stats;
47492 ulong now = jiffies;
47493 u32 eqd;
47494
47495 @@ -247,19 +516,17 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
47496 if ((now - stats->rx_fps_jiffies) < HZ)
47497 return;
47498
47499 - stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
47500 + stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
47501 ((now - stats->rx_fps_jiffies) / HZ);
47502
47503 stats->rx_fps_jiffies = now;
47504 - stats->be_prev_rx_frags = stats->be_rx_frags;
47505 - eqd = stats->be_rx_fps / 110000;
47506 + stats->prev_rx_frags = stats->rx_frags;
47507 + eqd = stats->rx_fps / 110000;
47508 eqd = eqd << 3;
47509 if (eqd > rx_eq->max_eqd)
47510 eqd = rx_eq->max_eqd;
47511 if (eqd < rx_eq->min_eqd)
47512 eqd = rx_eq->min_eqd;
47513 - if (eqd < 10)
47514 - eqd = 0;
47515 if (eqd != rx_eq->cur_eqd)
47516 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
47517
47518 @@ -270,7 +537,7 @@ static struct net_device_stats *be_get_stats(struct net_device *dev)
47519 {
47520 struct be_adapter *adapter = netdev_priv(dev);
47521
47522 - return &adapter->stats.net_stats;
47523 + return &adapter->net_stats;
47524 }
47525
47526 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
47527 @@ -284,9 +551,9 @@ static u32 be_calc_rate(u64 bytes, unsigned long ticks)
47528 return rate;
47529 }
47530
47531 -static void be_tx_rate_update(struct be_adapter *adapter)
47532 +static void be_tx_rate_update(struct be_tx_obj *txo)
47533 {
47534 - struct be_drvr_stats *stats = drvr_stats(adapter);
47535 + struct be_tx_stats *stats = tx_stats(txo);
47536 ulong now = jiffies;
47537
47538 /* Wrapped around? */
47539 @@ -305,10 +572,11 @@ static void be_tx_rate_update(struct be_adapter *adapter)
47540 }
47541 }
47542
47543 -static void be_tx_stats_update(struct be_adapter *adapter,
47544 +static void be_tx_stats_update(struct be_tx_obj *txo,
47545 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
47546 {
47547 - struct be_drvr_stats *stats = drvr_stats(adapter);
47548 + struct be_tx_stats *stats = tx_stats(txo);
47549 +
47550 stats->be_tx_reqs++;
47551 stats->be_tx_wrbs += wrb_cnt;
47552 stats->be_tx_bytes += copied;
47553 @@ -318,7 +586,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
47554 }
47555
47556 /* Determine number of WRB entries needed to xmit data in an skb */
47557 -static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
47558 +static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
47559 + bool *dummy)
47560 {
47561 int cnt = (skb->len > skb->data_len);
47562
47563 @@ -326,12 +595,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
47564
47565 /* to account for hdr wrb */
47566 cnt++;
47567 - if (cnt & 1) {
47568 + if (lancer_chip(adapter) || !(cnt & 1)) {
47569 + *dummy = false;
47570 + } else {
47571 /* add a dummy to make it an even num */
47572 cnt++;
47573 *dummy = true;
47574 - } else
47575 - *dummy = false;
47576 + }
47577 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
47578 return cnt;
47579 }
47580 @@ -343,17 +613,31 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
47581 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
47582 }
47583
47584 -static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47585 - bool vlan, u32 wrb_cnt, u32 len)
47586 +static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
47587 + struct sk_buff *skb, u32 wrb_cnt, u32 len)
47588 {
47589 + u16 vlan_tag = 0;
47590 +
47591 memset(hdr, 0, sizeof(*hdr));
47592
47593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
47594
47595 - if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
47596 + if (skb_is_gso(skb)) {
47597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
47598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
47599 hdr, skb_shinfo(skb)->gso_size);
47600 + if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
47601 + AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
47602 +
47603 + if (lancer_A0_chip(adapter)) {
47604 + AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
47605 + if (is_tcp_pkt(skb))
47606 + AMAP_SET_BITS(struct amap_eth_hdr_wrb,
47607 + tcpcs, hdr, 1);
47608 + else if (is_udp_pkt(skb))
47609 + AMAP_SET_BITS(struct amap_eth_hdr_wrb,
47610 + udpcs, hdr, 1);
47611 + }
47612 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
47613 if (is_tcp_pkt(skb))
47614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
47615 @@ -361,10 +645,10 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47616 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
47617 }
47618
47619 - if (vlan && vlan_tx_tag_present(skb)) {
47620 + if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
47621 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
47622 - AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
47623 - hdr, vlan_tx_tag_get(skb));
47624 + vlan_tag = be_get_tx_vlan_tag(adapter, skb);
47625 + AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
47626 }
47627
47628 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
47629 @@ -374,14 +658,13 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47630 }
47631
47632
47633 -static int make_tx_wrbs(struct be_adapter *adapter,
47634 +static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
47635 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
47636 {
47637 - u64 busaddr;
47638 - u32 i, copied = 0;
47639 + dma_addr_t busaddr;
47640 + int i, copied = 0;
47641 struct pci_dev *pdev = adapter->pdev;
47642 struct sk_buff *first_skb = skb;
47643 - struct be_queue_info *txq = &adapter->tx_obj.q;
47644 struct be_eth_wrb *wrb;
47645 struct be_eth_hdr_wrb *hdr;
47646
47647 @@ -389,15 +672,11 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47648 atomic_add(wrb_cnt, &txq->used);
47649 queue_head_inc(txq);
47650
47651 - if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
47652 - dev_err(&pdev->dev, "TX DMA mapping failed\n");
47653 - return 0;
47654 - }
47655 -
47656 if (skb->len > skb->data_len) {
47657 - int len = skb->len - skb->data_len;
47658 + int len = skb_headlen(skb);
47659 + busaddr = pci_map_single(pdev, skb->data, len,
47660 + PCI_DMA_TODEVICE);
47661 wrb = queue_head_node(txq);
47662 - busaddr = skb_shinfo(skb)->dma_head;
47663 wrb_fill(wrb, busaddr, len);
47664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
47665 queue_head_inc(txq);
47666 @@ -407,8 +686,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47667 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
47668 struct skb_frag_struct *frag =
47669 &skb_shinfo(skb)->frags[i];
47670 -
47671 - busaddr = skb_shinfo(skb)->dma_maps[i];
47672 + busaddr = pci_map_page(pdev, frag->page,
47673 + frag->page_offset,
47674 + frag->size, PCI_DMA_TODEVICE);
47675 wrb = queue_head_node(txq);
47676 wrb_fill(wrb, busaddr, frag->size);
47677 be_dws_cpu_to_le(wrb, sizeof(*wrb));
47678 @@ -423,8 +703,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47679 queue_head_inc(txq);
47680 }
47681
47682 - wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
47683 - wrb_cnt, copied);
47684 + wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
47685 be_dws_cpu_to_le(hdr, sizeof(*hdr));
47686
47687 return copied;
47688 @@ -434,19 +713,70 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
47689 struct net_device *netdev)
47690 {
47691 struct be_adapter *adapter = netdev_priv(netdev);
47692 - struct be_tx_obj *tx_obj = &adapter->tx_obj;
47693 - struct be_queue_info *txq = &tx_obj->q;
47694 + struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
47695 + struct be_queue_info *txq = &txo->q;
47696 u32 wrb_cnt = 0, copied = 0;
47697 u32 start = txq->head;
47698 bool dummy_wrb, stopped = false;
47699
47700 - wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
47701 + if (unlikely((skb_shinfo(skb)->gso_segs > 1) &&
47702 + skb_shinfo(skb)->gso_size && is_ipv6_ext_hdr(skb))) {
47703 + tx_stats(txo)->be_ipv6_ext_hdr_tx_drop++;
47704 + goto tx_drop;
47705 + }
47706
47707 - copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
47708 + /* If the skb is a large pkt forwarded to this interface
47709 + * after being LRO'd on another interface, drop the pkt.
47710 + * HW cannot handle such pkts. LRO must be disabled when
47711 + * using the server as a router.
47712 + */
47713 + if (!skb_is_gso(skb)) {
47714 + int eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
47715 + VLAN_ETH_HLEN : ETH_HLEN;
47716 +
47717 + if ((skb->len - eth_hdr_len) > adapter->netdev->mtu)
47718 + goto tx_drop;
47719 + }
47720 +
47721 + /* The ASIC is calculating checksum for Vlan tagged pkts
47722 + * though CSO is disabled.
47723 + * To work around this, insert the Vlan tag in the driver
47724 + * and donot set the vlan bit, cso bit in the Tx WRB.
47725 + */
47726 + if (unlikely(vlan_tx_tag_present(skb) &&
47727 + ((skb->ip_summed != CHECKSUM_PARTIAL) || (skb->len <= 60)))) {
47728 + /* Bug 28694: Don't embed the host VLAN tag in SKB
47729 + * when UMC mode enabled on that interface
47730 + */
47731 + if (!(adapter->function_mode & UMC_ENABLED)) {
47732 + skb = skb_share_check(skb, GFP_ATOMIC);
47733 + if (unlikely(!skb))
47734 + goto tx_drop;
47735 +
47736 + skb = be_vlan_put_tag(skb,
47737 + be_get_tx_vlan_tag(adapter, skb));
47738 + if (unlikely(!skb))
47739 + goto tx_drop;
47740 +
47741 + be_reset_skb_tx_vlan(skb);
47742 + }
47743 + }
47744 +
47745 + /* Bug 12422: the stack can send us skbs with length more than 65535
47746 + * BE cannot handle such requests. Hack the extra data out and drop it.
47747 + */
47748 + if (skb->len > 65535) {
47749 + int err = __pskb_trim(skb, 65535);
47750 + BUG_ON(err);
47751 + }
47752 +
47753 + wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
47754 +
47755 + copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
47756 if (copied) {
47757 /* record the sent skb in the sent_skb table */
47758 - BUG_ON(tx_obj->sent_skb_list[start]);
47759 - tx_obj->sent_skb_list[start] = skb;
47760 + BUG_ON(txo->sent_skb_list[start]);
47761 + txo->sent_skb_list[start] = skb;
47762
47763 /* Ensure txq has space for the next skb; Else stop the queue
47764 * *BEFORE* ringing the tx doorbell, so that we serialze the
47765 @@ -454,16 +784,21 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
47766 */
47767 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
47768 txq->len) {
47769 - netif_stop_queue(netdev);
47770 + netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
47771 stopped = true;
47772 }
47773
47774 be_txq_notify(adapter, txq->id, wrb_cnt);
47775
47776 - be_tx_stats_update(adapter, wrb_cnt, copied,
47777 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
47778 + netdev->trans_start = jiffies;
47779 +#endif
47780 +
47781 + be_tx_stats_update(txo, wrb_cnt, copied,
47782 skb_shinfo(skb)->gso_segs, stopped);
47783 } else {
47784 txq->head = start;
47785 +tx_drop:
47786 dev_kfree_skb_any(skb);
47787 }
47788 return NETDEV_TX_OK;
47789 @@ -473,10 +808,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
47790 {
47791 struct be_adapter *adapter = netdev_priv(netdev);
47792 if (new_mtu < BE_MIN_MTU ||
47793 - new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
47794 + new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
47795 + (ETH_HLEN + ETH_FCS_LEN))) {
47796 dev_info(&adapter->pdev->dev,
47797 "MTU must be between %d and %d bytes\n",
47798 - BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
47799 + BE_MIN_MTU,
47800 + (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
47801 return -EINVAL;
47802 }
47803 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
47804 @@ -486,17 +823,19 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
47805 }
47806
47807 /*
47808 - * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
47809 - * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
47810 - * set the BE in promiscuous VLAN mode.
47811 + * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
47812 + * If the user configures more, place BE in vlan promiscuous mode.
47813 */
47814 -static int be_vid_config(struct be_adapter *adapter)
47815 +static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
47816 {
47817 u16 vtag[BE_NUM_VLANS_SUPPORTED];
47818 u16 ntags = 0, i;
47819 - int status;
47820 + int status = 0;
47821
47822 - if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
47823 + /* No need to change the VLAN state if the I/F is in promiscous */
47824 + if (adapter->promiscuous)
47825 + return 0;
47826 + if (adapter->vlans_added <= adapter->max_vlans) {
47827 /* Construct VLAN Table to give to HW */
47828 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
47829 if (adapter->vlan_tag[i]) {
47830 @@ -504,47 +843,46 @@ static int be_vid_config(struct be_adapter *adapter)
47831 ntags++;
47832 }
47833 }
47834 - status = be_cmd_vlan_config(adapter, adapter->if_handle,
47835 - vtag, ntags, 1, 0);
47836 + /* Send command only if there is something to be programmed */
47837 + if (ntags)
47838 + status = be_cmd_vlan_config(adapter, adapter->if_handle,
47839 + vtag, ntags, 1, 0);
47840 } else {
47841 status = be_cmd_vlan_config(adapter, adapter->if_handle,
47842 - NULL, 0, 1, 1);
47843 + NULL, 0, 1, 1);
47844 }
47845 +
47846 return status;
47847 }
47848
47849 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
47850 {
47851 struct be_adapter *adapter = netdev_priv(netdev);
47852 - struct be_eq_obj *rx_eq = &adapter->rx_eq;
47853 - struct be_eq_obj *tx_eq = &adapter->tx_eq;
47854
47855 - be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
47856 - be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
47857 adapter->vlan_grp = grp;
47858 - be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
47859 - be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
47860 }
47861
47862 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
47863 {
47864 struct be_adapter *adapter = netdev_priv(netdev);
47865
47866 - adapter->num_vlans++;
47867 + adapter->vlans_added++;
47868 +
47869 adapter->vlan_tag[vid] = 1;
47870 -
47871 - be_vid_config(adapter);
47872 + if (adapter->vlans_added <= (adapter->max_vlans + 1))
47873 + be_vid_config(adapter, false, 0);
47874 }
47875
47876 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
47877 {
47878 struct be_adapter *adapter = netdev_priv(netdev);
47879
47880 - adapter->num_vlans--;
47881 - adapter->vlan_tag[vid] = 0;
47882 -
47883 + adapter->vlans_added--;
47884 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
47885 - be_vid_config(adapter);
47886 +
47887 + adapter->vlan_tag[vid] = 0;
47888 + if (adapter->vlans_added <= adapter->max_vlans)
47889 + be_vid_config(adapter, false, 0);
47890 }
47891
47892 static void be_set_multicast_list(struct net_device *netdev)
47893 @@ -552,7 +890,7 @@ static void be_set_multicast_list(struct net_device *netdev)
47894 struct be_adapter *adapter = netdev_priv(netdev);
47895
47896 if (netdev->flags & IFF_PROMISC) {
47897 - be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
47898 + be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
47899 adapter->promiscuous = true;
47900 goto done;
47901 }
47902 @@ -560,81 +898,244 @@ static void be_set_multicast_list(struct net_device *netdev)
47903 /* BE was previously in promiscous mode; disable it */
47904 if (adapter->promiscuous) {
47905 adapter->promiscuous = false;
47906 - be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
47907 + be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
47908 +
47909 + if (adapter->vlans_added)
47910 + be_vid_config(adapter, false, 0);
47911 }
47912
47913 - if (netdev->flags & IFF_ALLMULTI) {
47914 - be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0);
47915 + /* Enable multicast promisc if num configured exceeds what we support */
47916 + if (netdev->flags & IFF_ALLMULTI ||
47917 + netdev_mc_count(netdev) > BE_MAX_MC) {
47918 + be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
47919 goto done;
47920 }
47921
47922 - be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
47923 - netdev->mc_count);
47924 + be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
47925 done:
47926 return;
47927 }
47928
47929 -static void be_rx_rate_update(struct be_adapter *adapter)
47930 +#ifdef HAVE_SRIOV_CONFIG
47931 +static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
47932 {
47933 - struct be_drvr_stats *stats = drvr_stats(adapter);
47934 + struct be_adapter *adapter = netdev_priv(netdev);
47935 + int status;
47936 +
47937 + if (adapter->num_vfs == 0)
47938 + return -EPERM;
47939 +
47940 + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
47941 + return -EINVAL;
47942 +
47943 + if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
47944 + status = be_cmd_pmac_del(adapter,
47945 + adapter->vf_cfg[vf].vf_if_handle,
47946 + adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
47947 +
47948 + status = be_cmd_pmac_add(adapter, mac,
47949 + adapter->vf_cfg[vf].vf_if_handle,
47950 + &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
47951 +
47952 + if (status)
47953 + dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
47954 + mac, vf);
47955 + else
47956 + memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
47957 +
47958 + return status;
47959 +}
47960 +
47961 +static int be_get_vf_config(struct net_device *netdev, int vf,
47962 + struct ifla_vf_info *vi)
47963 +{
47964 + struct be_adapter *adapter = netdev_priv(netdev);
47965 +
47966 + if (adapter->num_vfs == 0)
47967 + return -EPERM;
47968 +
47969 + if (vf >= adapter->num_vfs)
47970 + return -EINVAL;
47971 +
47972 + vi->vf = vf;
47973 + vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
47974 + vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag & VLAN_VID_MASK;
47975 + vi->qos = adapter->vf_cfg[vf].vf_vlan_tag >> VLAN_PRIO_SHIFT;
47976 + memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
47977 +
47978 + return 0;
47979 +}
47980 +
47981 +/*
47982 + * Entry point to configure vlan behavior for a VF.
47983 + * 1. By default a VF is vlan Challenged.
47984 + * 2. It may or may not have Transparent Tagging enabled.
47985 + * 3. Vlan privilege for a VF can be toggled using special VID 4095.
47986 + * 4. When removing the Vlan privilege for a VF there is no need set default vid
47987 + * 5. Transparent Tagging configured for a VF resets its Vlan privilege
47988 + * 6. To disable the current Transparet Tagging for a VF:
47989 + * 6a. run the last iproute command with vlan set to 0.
47990 + * 6b. programing the default vid will disable Transparent Tagging in ARM/ASIC
47991 + */
47992 +static int be_set_vf_vlan(struct net_device *netdev,
47993 + int vf, u16 vlan, u8 qos)
47994 +{
47995 + struct be_adapter *adapter = netdev_priv(netdev);
47996 + int status = 0;
47997 + u32 en = 0;
47998 +
47999 + if (adapter->num_vfs == 0)
48000 + return -EPERM;
48001 +
48002 + if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
48003 + return -EINVAL;
48004 +
48005 + status = be_cmd_get_fn_privileges(adapter, &en, vf + 1);
48006 + if (status)
48007 + goto sts;
48008 +
48009 + if (vlan == 4095) {
48010 + if (en & BE_PRIV_FILTMGMT) {
48011 + /* Knock off filtering privileges */
48012 + en &= ~BE_PRIV_FILTMGMT;
48013 + } else {
48014 + en |= BE_PRIV_FILTMGMT;
48015 + /* Transparent Tagging is currently enabled, Reset it */
48016 + if (adapter->vf_cfg[vf].vf_vlan_tag) {
48017 + adapter->vf_cfg[vf].vf_vlan_tag = 0;
48018 + vlan = adapter->vf_cfg[vf].vf_def_vid;
48019 + be_cmd_set_hsw_config(adapter, vlan, vf + 1,
48020 + adapter->vf_cfg[vf].vf_if_handle);
48021 + }
48022 + }
48023 +
48024 + adapter->vf_cfg[vf].vf_vlan_tag = 0;
48025 + status = be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1);
48026 +
48027 + goto sts;
48028 + }
48029 +
48030 + if (vlan || qos) {
48031 + if (en & BE_PRIV_FILTMGMT) {
48032 + /* Check privilege and reset it to default */
48033 + en &= ~BE_PRIV_FILTMGMT;
48034 + be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1);
48035 + }
48036 +
48037 + vlan |= qos << VLAN_PRIO_SHIFT;
48038 + if (adapter->vf_cfg[vf].vf_vlan_tag != vlan) {
48039 + /* If this is new value, program it. Else skip. */
48040 + adapter->vf_cfg[vf].vf_vlan_tag = vlan;
48041 +
48042 + status = be_cmd_set_hsw_config(adapter, vlan,
48043 + vf + 1, adapter->vf_cfg[vf].vf_if_handle);
48044 + }
48045 +
48046 + } else {
48047 + /* Reset Transparent Vlan Tagging. */
48048 + adapter->vf_cfg[vf].vf_vlan_tag = 0;
48049 + vlan = adapter->vf_cfg[vf].vf_def_vid;
48050 + status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
48051 + adapter->vf_cfg[vf].vf_if_handle);
48052 + }
48053 +
48054 +sts:
48055 + if (status)
48056 + dev_info(&adapter->pdev->dev,
48057 + "VLAN %d config on VF %d failed\n", vlan, vf);
48058 + return status;
48059 +}
48060 +
48061 +static int be_set_vf_tx_rate(struct net_device *netdev,
48062 + int vf, int rate)
48063 +{
48064 + struct be_adapter *adapter = netdev_priv(netdev);
48065 + int status = 0;
48066 +
48067 + if (adapter->num_vfs == 0)
48068 + return -EPERM;
48069 +
48070 + if ((vf >= adapter->num_vfs) || (rate < 0))
48071 + return -EINVAL;
48072 +
48073 + if (rate > 10000)
48074 + rate = 10000;
48075 +
48076 + adapter->vf_cfg[vf].vf_tx_rate = rate;
48077 + status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
48078 +
48079 + if (status)
48080 + dev_info(&adapter->pdev->dev,
48081 + "tx rate %d on VF %d failed\n", rate, vf);
48082 + return status;
48083 +}
48084 +#endif /* HAVE_SRIOV_CONFIG */
48085 +
48086 +static void be_rx_rate_update(struct be_rx_obj *rxo)
48087 +{
48088 + struct be_rx_stats *stats = &rxo->stats;
48089 ulong now = jiffies;
48090
48091 /* Wrapped around */
48092 - if (time_before(now, stats->be_rx_jiffies)) {
48093 - stats->be_rx_jiffies = now;
48094 + if (time_before(now, stats->rx_jiffies)) {
48095 + stats->rx_jiffies = now;
48096 return;
48097 }
48098
48099 /* Update the rate once in two seconds */
48100 - if ((now - stats->be_rx_jiffies) < 2 * HZ)
48101 + if ((now - stats->rx_jiffies) < 2 * HZ)
48102 return;
48103
48104 - stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
48105 - - stats->be_rx_bytes_prev,
48106 - now - stats->be_rx_jiffies);
48107 - stats->be_rx_jiffies = now;
48108 - stats->be_rx_bytes_prev = stats->be_rx_bytes;
48109 + stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
48110 + now - stats->rx_jiffies);
48111 + stats->rx_jiffies = now;
48112 + stats->rx_bytes_prev = stats->rx_bytes;
48113 }
48114
48115 -static void be_rx_stats_update(struct be_adapter *adapter,
48116 - u32 pktsize, u16 numfrags)
48117 +static void be_rx_stats_update(struct be_rx_obj *rxo,
48118 + struct be_rx_compl_info *rxcp)
48119 {
48120 - struct be_drvr_stats *stats = drvr_stats(adapter);
48121 + struct be_rx_stats *stats = &rxo->stats;
48122
48123 - stats->be_rx_compl++;
48124 - stats->be_rx_frags += numfrags;
48125 - stats->be_rx_bytes += pktsize;
48126 - stats->be_rx_pkts++;
48127 + stats->rx_compl++;
48128 + stats->rx_frags += rxcp->num_rcvd;
48129 + stats->rx_bytes += rxcp->pkt_size;
48130 + stats->rx_pkts++;
48131 + if (rxcp->pkt_type == BE_MULTICAST_PACKET)
48132 + stats->rx_mcast_pkts++;
48133 + if (rxcp->err)
48134 + stats->rxcp_err++;
48135 }
48136
48137 -static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
48138 +static inline bool csum_passed(struct be_rx_compl_info *rxcp)
48139 {
48140 - u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
48141 -
48142 - l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
48143 - ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
48144 - ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
48145 - if (ip_version) {
48146 - tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
48147 - udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
48148 - }
48149 - ipv6_chk = (ip_version && (tcpf || udpf));
48150 -
48151 - return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
48152 + /* L4 checksum is not reliable for non TCP/UDP packets.
48153 + * Also ignore ipcksm for ipv6 pkts */
48154 + return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
48155 + (rxcp->ip_csum || rxcp->ipv6);
48156 }
48157
48158 static struct be_rx_page_info *
48159 -get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
48160 +get_rx_page_info(struct be_adapter *adapter, struct be_rx_obj *rxo,
48161 + u16 frag_idx)
48162 {
48163 struct be_rx_page_info *rx_page_info;
48164 - struct be_queue_info *rxq = &adapter->rx_obj.q;
48165 + struct be_queue_info *rxq = &rxo->q;
48166
48167 - rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
48168 - BUG_ON(!rx_page_info->page);
48169 + rx_page_info = &rxo->page_info_tbl[frag_idx];
48170 + if (!rx_page_info->page) {
48171 + printk(KERN_EMERG "curr_idx=%d prev_dix=%d rxq->head=%d\n",
48172 + frag_idx, rxo->prev_frag_idx, rxq->head);
48173 + BUG_ON(!rx_page_info->page);
48174 + }
48175
48176 - if (rx_page_info->last_page_user)
48177 + if (rx_page_info->last_page_user) {
48178 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
48179 adapter->big_page_size, PCI_DMA_FROMDEVICE);
48180 + rx_page_info->last_page_user = false;
48181 + }
48182 +
48183 + rxo->prev_frag_idx = frag_idx;
48184
48185 atomic_dec(&rxq->used);
48186 return rx_page_info;
48187 @@ -642,20 +1143,26 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
48188
48189 /* Throwaway the data in the Rx completion */
48190 static void be_rx_compl_discard(struct be_adapter *adapter,
48191 - struct be_eth_rx_compl *rxcp)
48192 + struct be_rx_obj *rxo,
48193 + struct be_rx_compl_info *rxcp)
48194 {
48195 - struct be_queue_info *rxq = &adapter->rx_obj.q;
48196 + struct be_queue_info *rxq = &rxo->q;
48197 struct be_rx_page_info *page_info;
48198 - u16 rxq_idx, i, num_rcvd;
48199 + u16 i;
48200 + bool oob_error;
48201 + u16 num_rcvd = rxcp->num_rcvd;
48202
48203 - rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48204 - num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48205 + oob_error = lancer_A0_chip(adapter) && rxcp->err;
48206 +
48207 + /* In case of OOB error num_rcvd will be 1 more than actual */
48208 + if (oob_error && num_rcvd)
48209 + num_rcvd -= 1;
48210
48211 for (i = 0; i < num_rcvd; i++) {
48212 - page_info = get_rx_page_info(adapter, rxq_idx);
48213 + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48214 put_page(page_info->page);
48215 memset(page_info, 0, sizeof(*page_info));
48216 - index_inc(&rxq_idx, rxq->len);
48217 + index_inc(&rxcp->rxq_idx, rxq->len);
48218 }
48219 }
48220
48221 @@ -663,29 +1170,24 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
48222 * skb_fill_rx_data forms a complete skb for an ether frame
48223 * indicated by rxcp.
48224 */
48225 -static void skb_fill_rx_data(struct be_adapter *adapter,
48226 - struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
48227 +static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
48228 + struct sk_buff *skb, struct be_rx_compl_info *rxcp)
48229 {
48230 - struct be_queue_info *rxq = &adapter->rx_obj.q;
48231 + struct be_queue_info *rxq = &rxo->q;
48232 struct be_rx_page_info *page_info;
48233 - u16 rxq_idx, i, num_rcvd, j;
48234 - u32 pktsize, hdr_len, curr_frag_len, size;
48235 + u16 i, j;
48236 + u16 hdr_len, curr_frag_len, remaining;
48237 u8 *start;
48238
48239 - rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48240 - pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
48241 - num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48242 -
48243 - page_info = get_rx_page_info(adapter, rxq_idx);
48244 -
48245 + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48246 start = page_address(page_info->page) + page_info->page_offset;
48247 prefetch(start);
48248
48249 /* Copy data in the first descriptor of this completion */
48250 - curr_frag_len = min(pktsize, rx_frag_size);
48251 + curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
48252
48253 /* Copy the header portion into skb_data */
48254 - hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
48255 + hdr_len = min(BE_HDR_LEN, curr_frag_len);
48256 memcpy(skb->data, start, hdr_len);
48257 skb->len = curr_frag_len;
48258 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
48259 @@ -702,21 +1204,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
48260 skb->data_len = curr_frag_len - hdr_len;
48261 skb->tail += hdr_len;
48262 }
48263 - memset(page_info, 0, sizeof(*page_info));
48264 + page_info->page = NULL;
48265
48266 - if (pktsize <= rx_frag_size) {
48267 - BUG_ON(num_rcvd != 1);
48268 - goto done;
48269 + if (rxcp->pkt_size <= rx_frag_size) {
48270 + BUG_ON(rxcp->num_rcvd != 1);
48271 + return;
48272 }
48273
48274 /* More frags present for this completion */
48275 - size = pktsize;
48276 - for (i = 1, j = 0; i < num_rcvd; i++) {
48277 - size -= curr_frag_len;
48278 - index_inc(&rxq_idx, rxq->len);
48279 - page_info = get_rx_page_info(adapter, rxq_idx);
48280 -
48281 - curr_frag_len = min(size, rx_frag_size);
48282 + index_inc(&rxcp->rxq_idx, rxq->len);
48283 + remaining = rxcp->pkt_size - curr_frag_len;
48284 + for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
48285 + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48286 + curr_frag_len = min(remaining, rx_frag_size);
48287
48288 /* Coalesce all frags from the same physical page in one slot */
48289 if (page_info->page_offset == 0) {
48290 @@ -735,99 +1235,122 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
48291 skb->len += curr_frag_len;
48292 skb->data_len += curr_frag_len;
48293
48294 - memset(page_info, 0, sizeof(*page_info));
48295 + remaining -= curr_frag_len;
48296 + index_inc(&rxcp->rxq_idx, rxq->len);
48297 + page_info->page = NULL;
48298 }
48299 BUG_ON(j > MAX_SKB_FRAGS);
48300 -
48301 -done:
48302 - be_rx_stats_update(adapter, pktsize, num_rcvd);
48303 - return;
48304 }
48305
48306 -/* Process the RX completion indicated by rxcp when GRO is disabled */
48307 +/* Process the RX completion indicated by rxcp when LRO is disabled */
48308 static void be_rx_compl_process(struct be_adapter *adapter,
48309 - struct be_eth_rx_compl *rxcp)
48310 + struct be_rx_obj *rxo,
48311 + struct be_rx_compl_info *rxcp)
48312 {
48313 struct sk_buff *skb;
48314 - u32 vlanf, vid;
48315 - u8 vtm;
48316
48317 - vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
48318 - vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
48319 -
48320 - /* vlanf could be wrongly set in some cards.
48321 - * ignore if vtm is not set */
48322 - if ((adapter->cap == 0x400) && !vtm)
48323 - vlanf = 0;
48324 -
48325 - skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
48326 - if (!skb) {
48327 + skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
48328 + if (unlikely(!skb)) {
48329 if (net_ratelimit())
48330 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
48331 - be_rx_compl_discard(adapter, rxcp);
48332 + be_rx_compl_discard(adapter, rxo, rxcp);
48333 return;
48334 }
48335
48336 - skb_reserve(skb, NET_IP_ALIGN);
48337 + skb_fill_rx_data(adapter, rxo, skb, rxcp);
48338
48339 - skb_fill_rx_data(adapter, skb, rxcp);
48340 -
48341 - if (do_pkt_csum(rxcp, adapter->rx_csum))
48342 - skb->ip_summed = CHECKSUM_NONE;
48343 - else
48344 + if (likely(adapter->rx_csum && csum_passed(rxcp)))
48345 skb->ip_summed = CHECKSUM_UNNECESSARY;
48346 + else
48347 + skb->ip_summed = CHECKSUM_NONE;
48348
48349 skb->truesize = skb->len + sizeof(struct sk_buff);
48350 + if (unlikely(rxcp->vlanf) &&
48351 + unlikely(!vlan_configured(adapter))) {
48352 + __vlan_put_tag(skb, rxcp->vlan_tag);
48353 + }
48354 skb->protocol = eth_type_trans(skb, adapter->netdev);
48355 skb->dev = adapter->netdev;
48356
48357 - if (vlanf) {
48358 - if (!adapter->vlan_grp || adapter->num_vlans == 0) {
48359 - kfree_skb(skb);
48360 - return;
48361 - }
48362 - vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
48363 - vid = be16_to_cpu(vid);
48364 - vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
48365 - } else {
48366 + if (unlikely(rxcp->vlanf) &&
48367 + vlan_configured(adapter))
48368 + vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
48369 + rxcp->vlan_tag);
48370 + else
48371 netif_receive_skb(skb);
48372 +
48373 + return;
48374 +}
48375 +
48376 +/* Process the RX completion indicated by rxcp when LRO is enabled */
48377 +static void be_rx_compl_process_lro(struct be_adapter *adapter,
48378 + struct be_rx_obj *rxo,
48379 + struct be_rx_compl_info *rxcp)
48380 +{
48381 + struct be_rx_page_info *page_info;
48382 + struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
48383 + struct be_queue_info *rxq = &rxo->q;
48384 + u16 remaining, curr_frag_len;
48385 + u16 i, j;
48386 +
48387 + remaining = rxcp->pkt_size;
48388 + for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
48389 + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48390 +
48391 + curr_frag_len = min(remaining, rx_frag_size);
48392 +
48393 + /* Coalesce all frags from the same physical page in one slot */
48394 + if (i == 0 || page_info->page_offset == 0) {
48395 + /* First frag or Fresh page */
48396 + j++;
48397 + rx_frags[j].page = page_info->page;
48398 + rx_frags[j].page_offset = page_info->page_offset;
48399 + rx_frags[j].size = 0;
48400 + } else {
48401 + put_page(page_info->page);
48402 + }
48403 + rx_frags[j].size += curr_frag_len;
48404 +
48405 + remaining -= curr_frag_len;
48406 + index_inc(&rxcp->rxq_idx, rxq->len);
48407 + memset(page_info, 0, sizeof(*page_info));
48408 + }
48409 + BUG_ON(j > MAX_SKB_FRAGS);
48410 +
48411 + if (likely(!rxcp->vlanf)) {
48412 + lro_receive_frags(&rxo->lro_mgr, rx_frags, rxcp->pkt_size,
48413 + rxcp->pkt_size, NULL, 0);
48414 + } else {
48415 + lro_vlan_hwaccel_receive_frags(&rxo->lro_mgr, rx_frags,
48416 + rxcp->pkt_size, rxcp->pkt_size, adapter->vlan_grp,
48417 + rxcp->vlan_tag, NULL, 0);
48418 }
48419
48420 return;
48421 }
48422
48423 /* Process the RX completion indicated by rxcp when GRO is enabled */
48424 -static void be_rx_compl_process_gro(struct be_adapter *adapter,
48425 - struct be_eth_rx_compl *rxcp)
48426 +void be_rx_compl_process_gro(struct be_adapter *adapter,
48427 + struct be_rx_obj *rxo,
48428 + struct be_rx_compl_info *rxcp)
48429 {
48430 +#ifdef NETIF_F_GRO
48431 struct be_rx_page_info *page_info;
48432 struct sk_buff *skb = NULL;
48433 - struct be_queue_info *rxq = &adapter->rx_obj.q;
48434 - struct be_eq_obj *eq_obj = &adapter->rx_eq;
48435 - u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
48436 - u16 i, rxq_idx = 0, vid, j;
48437 - u8 vtm;
48438 -
48439 - num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48440 - pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
48441 - vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
48442 - rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48443 - vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
48444 -
48445 - /* vlanf could be wrongly set in some cards.
48446 - * ignore if vtm is not set */
48447 - if ((adapter->cap == 0x400) && !vtm)
48448 - vlanf = 0;
48449 + struct be_queue_info *rxq = &rxo->q;
48450 + struct be_eq_obj *eq_obj = &rxo->rx_eq;
48451 + u16 remaining, curr_frag_len;
48452 + u16 i, j;
48453
48454 skb = napi_get_frags(&eq_obj->napi);
48455 if (!skb) {
48456 - be_rx_compl_discard(adapter, rxcp);
48457 + be_rx_compl_discard(adapter, rxo, rxcp);
48458 return;
48459 }
48460
48461 - remaining = pkt_size;
48462 - for (i = 0, j = -1; i < num_rcvd; i++) {
48463 - page_info = get_rx_page_info(adapter, rxq_idx);
48464 + remaining = rxcp->pkt_size;
48465 + for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
48466 + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48467
48468 curr_frag_len = min(remaining, rx_frag_size);
48469
48470 @@ -845,55 +1368,129 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
48471 skb_shinfo(skb)->frags[j].size += curr_frag_len;
48472
48473 remaining -= curr_frag_len;
48474 - index_inc(&rxq_idx, rxq->len);
48475 + index_inc(&rxcp->rxq_idx, rxq->len);
48476 memset(page_info, 0, sizeof(*page_info));
48477 }
48478 BUG_ON(j > MAX_SKB_FRAGS);
48479
48480 skb_shinfo(skb)->nr_frags = j + 1;
48481 - skb->len = pkt_size;
48482 - skb->data_len = pkt_size;
48483 - skb->truesize += pkt_size;
48484 + skb->len = rxcp->pkt_size;
48485 + skb->data_len = rxcp->pkt_size;
48486 + skb->truesize += rxcp->pkt_size;
48487 skb->ip_summed = CHECKSUM_UNNECESSARY;
48488
48489 - if (likely(!vlanf)) {
48490 + if (likely(!rxcp->vlanf))
48491 napi_gro_frags(&eq_obj->napi);
48492 - } else {
48493 - vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
48494 - vid = be16_to_cpu(vid);
48495 + else
48496 + vlan_gro_frags(&eq_obj->napi,
48497 + adapter->vlan_grp, rxcp->vlan_tag);
48498 +#endif
48499
48500 - if (!adapter->vlan_grp || adapter->num_vlans == 0)
48501 - return;
48502 -
48503 - vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
48504 - }
48505 -
48506 - be_rx_stats_update(adapter, pkt_size, num_rcvd);
48507 return;
48508 }
48509
48510 -static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
48511 +static void be_parse_rx_compl_v1(struct be_adapter *adapter,
48512 + struct be_eth_rx_compl *compl,
48513 + struct be_rx_compl_info *rxcp)
48514 {
48515 - struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
48516 + rxcp->pkt_size =
48517 + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
48518 + rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
48519 + rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
48520 + rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
48521 + rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
48522 + rxcp->ip_csum =
48523 + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
48524 + rxcp->l4_csum =
48525 + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
48526 + rxcp->ipv6 =
48527 + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
48528 + rxcp->rxq_idx =
48529 + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
48530 + rxcp->num_rcvd =
48531 + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
48532 + rxcp->pkt_type =
48533 + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
48534 + if (rxcp->vlanf) {
48535 + rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
48536 + compl);
48537 + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
48538 + vlan_tag, compl);
48539 + }
48540 + rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
48541 +}
48542
48543 - if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
48544 +static void be_parse_rx_compl_v0(struct be_adapter *adapter,
48545 + struct be_eth_rx_compl *compl,
48546 + struct be_rx_compl_info *rxcp)
48547 +{
48548 + rxcp->pkt_size =
48549 + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
48550 + rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
48551 + rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
48552 + rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
48553 + rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
48554 + rxcp->ip_csum =
48555 + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
48556 + rxcp->l4_csum =
48557 + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
48558 + rxcp->ipv6 =
48559 + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
48560 + rxcp->rxq_idx =
48561 + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
48562 + rxcp->num_rcvd =
48563 + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
48564 + rxcp->pkt_type =
48565 + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
48566 + if (rxcp->vlanf) {
48567 + rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
48568 + compl);
48569 + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
48570 + vlan_tag, compl);
48571 + }
48572 + rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
48573 +}
48574 +
48575 +static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
48576 +{
48577 + struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
48578 + struct be_rx_compl_info *rxcp = &rxo->rxcp;
48579 + struct be_adapter *adapter = rxo->adapter;
48580 +
48581 + /* For checking the valid bit it is Ok to use either definition as the
48582 + * valid bit is at the same position in both v0 and v1 Rx compl */
48583 + if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
48584 return NULL;
48585
48586 - be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
48587 + rmb();
48588 + be_dws_le_to_cpu(compl, sizeof(*compl));
48589
48590 - queue_tail_inc(&adapter->rx_obj.cq);
48591 + if (adapter->be3_native)
48592 + be_parse_rx_compl_v1(adapter, compl, rxcp);
48593 + else
48594 + be_parse_rx_compl_v0(adapter, compl, rxcp);
48595 +
48596 + if (rxcp->vlanf) {
48597 + /* vlanf could be wrongly set in some cards.
48598 + * ignore if vtm is not set */
48599 + if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
48600 + rxcp->vlanf = 0;
48601 +
48602 + if (!lancer_chip(adapter))
48603 + rxcp->vlan_tag = swab16(rxcp->vlan_tag);
48604 +
48605 + if ((adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK)) &&
48606 + !adapter->vlan_tag[rxcp->vlan_tag])
48607 + rxcp->vlanf = 0;
48608 + }
48609 +
48610 + /* As the compl has been parsed, reset it; we wont touch it again */
48611 + compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
48612 +
48613 + queue_tail_inc(&rxo->cq);
48614 return rxcp;
48615 }
48616
48617 -/* To reset the valid bit, we need to reset the whole word as
48618 - * when walking the queue the valid entries are little-endian
48619 - * and invalid entries are host endian
48620 - */
48621 -static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
48622 -{
48623 - rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
48624 -}
48625 -
48626 static inline struct page *be_alloc_pages(u32 size)
48627 {
48628 gfp_t alloc_flags = GFP_ATOMIC;
48629 @@ -907,11 +1504,12 @@ static inline struct page *be_alloc_pages(u32 size)
48630 * Allocate a page, split it to fragments of size rx_frag_size and post as
48631 * receive buffers to BE
48632 */
48633 -static void be_post_rx_frags(struct be_adapter *adapter)
48634 +static void be_post_rx_frags(struct be_rx_obj *rxo)
48635 {
48636 - struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
48637 - struct be_rx_page_info *page_info = NULL;
48638 - struct be_queue_info *rxq = &adapter->rx_obj.q;
48639 + struct be_adapter *adapter = rxo->adapter;
48640 + struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
48641 + struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
48642 + struct be_queue_info *rxq = &rxo->q;
48643 struct page *pagep = NULL;
48644 struct be_eth_rx_d *rxd;
48645 u64 page_dmaaddr = 0, frag_dmaaddr;
48646 @@ -922,7 +1520,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48647 if (!pagep) {
48648 pagep = be_alloc_pages(adapter->big_page_size);
48649 if (unlikely(!pagep)) {
48650 - drvr_stats(adapter)->be_ethrx_post_fail++;
48651 + rxo->stats.rx_post_fail++;
48652 break;
48653 }
48654 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
48655 @@ -941,7 +1539,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48656 rxd = queue_head_node(rxq);
48657 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
48658 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
48659 - queue_head_inc(rxq);
48660
48661 /* Any space left in the current big page for another frag? */
48662 if ((page_offset + rx_frag_size + rx_frag_size) >
48663 @@ -949,17 +1546,24 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48664 pagep = NULL;
48665 page_info->last_page_user = true;
48666 }
48667 +
48668 + prev_page_info = page_info;
48669 + queue_head_inc(rxq);
48670 page_info = &page_info_tbl[rxq->head];
48671 }
48672 if (pagep)
48673 - page_info->last_page_user = true;
48674 + prev_page_info->last_page_user = true;
48675
48676 + /* Ensure that posting buffers is the last thing done by this
48677 + * routine to avoid racing between rx bottom-half and
48678 + * be_worker (process) contexts.
48679 + */
48680 if (posted) {
48681 atomic_add(posted, &rxq->used);
48682 be_rxq_notify(adapter, rxq->id, posted);
48683 } else if (atomic_read(&rxq->used) == 0) {
48684 /* Let be_worker replenish when memory is available */
48685 - adapter->rx_post_starved = true;
48686 + rxo->rx_post_starved = true;
48687 }
48688
48689 return;
48690 @@ -972,6 +1576,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
48691 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
48692 return NULL;
48693
48694 + rmb();
48695 be_dws_le_to_cpu(txcp, sizeof(*txcp));
48696
48697 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
48698 @@ -980,11 +1585,14 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
48699 return txcp;
48700 }
48701
48702 -static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
48703 +static u16 be_tx_compl_process(struct be_adapter *adapter,
48704 + struct be_tx_obj *txo, u16 last_index)
48705 {
48706 - struct be_queue_info *txq = &adapter->tx_obj.q;
48707 - struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
48708 + struct be_queue_info *txq = &txo->q;
48709 + struct be_eth_wrb *wrb;
48710 + struct sk_buff **sent_skbs = txo->sent_skb_list;
48711 struct sk_buff *sent_skb;
48712 + u64 busaddr;
48713 u16 cur_index, num_wrbs = 0;
48714
48715 cur_index = txq->tail;
48716 @@ -992,15 +1600,31 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
48717 BUG_ON(!sent_skb);
48718 sent_skbs[cur_index] = NULL;
48719
48720 - do {
48721 + wrb = queue_tail_node(txq);
48722 + be_dws_le_to_cpu(wrb, sizeof(*wrb));
48723 + busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
48724 + if (busaddr != 0) {
48725 + pci_unmap_single(adapter->pdev, busaddr,
48726 + wrb->frag_len, PCI_DMA_TODEVICE);
48727 + }
48728 + num_wrbs++;
48729 + queue_tail_inc(txq);
48730 +
48731 + while (cur_index != last_index) {
48732 cur_index = txq->tail;
48733 + wrb = queue_tail_node(txq);
48734 + be_dws_le_to_cpu(wrb, sizeof(*wrb));
48735 + busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
48736 + if (busaddr != 0) {
48737 + pci_unmap_page(adapter->pdev, busaddr,
48738 + wrb->frag_len, PCI_DMA_TODEVICE);
48739 + }
48740 num_wrbs++;
48741 queue_tail_inc(txq);
48742 - } while (cur_index != last_index);
48743 + }
48744
48745 - atomic_sub(num_wrbs, &txq->used);
48746 - skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE);
48747 kfree_skb(sent_skb);
48748 + return num_wrbs;
48749 }
48750
48751 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
48752 @@ -1010,13 +1634,15 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
48753 if (!eqe->evt)
48754 return NULL;
48755
48756 + rmb();
48757 eqe->evt = le32_to_cpu(eqe->evt);
48758 queue_tail_inc(&eq_obj->q);
48759 return eqe;
48760 }
48761
48762 static int event_handle(struct be_adapter *adapter,
48763 - struct be_eq_obj *eq_obj)
48764 + struct be_eq_obj *eq_obj,
48765 + bool rearm)
48766 {
48767 struct be_eq_entry *eqe;
48768 u16 num = 0;
48769 @@ -1029,7 +1655,10 @@ static int event_handle(struct be_adapter *adapter,
48770 /* Deal with any spurious interrupts that come
48771 * without events
48772 */
48773 - be_eq_notify(adapter, eq_obj->q.id, true, true, num);
48774 + if (!num)
48775 + rearm = true;
48776 +
48777 + be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
48778 if (num)
48779 napi_schedule(&eq_obj->napi);
48780
48781 @@ -1053,49 +1682,55 @@ static void be_eq_clean(struct be_adapter *adapter,
48782 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
48783 }
48784
48785 -static void be_rx_q_clean(struct be_adapter *adapter)
48786 +static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
48787 {
48788 struct be_rx_page_info *page_info;
48789 - struct be_queue_info *rxq = &adapter->rx_obj.q;
48790 - struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
48791 - struct be_eth_rx_compl *rxcp;
48792 + struct be_queue_info *rxq = &rxo->q;
48793 + struct be_queue_info *rx_cq = &rxo->cq;
48794 + struct be_rx_compl_info *rxcp;
48795 u16 tail;
48796
48797 /* First cleanup pending rx completions */
48798 - while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
48799 - be_rx_compl_discard(adapter, rxcp);
48800 - be_rx_compl_reset(rxcp);
48801 + while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
48802 + be_rx_compl_discard(adapter, rxo, rxcp);
48803 be_cq_notify(adapter, rx_cq->id, true, 1);
48804 }
48805
48806 /* Then free posted rx buffer that were not used */
48807 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
48808 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
48809 - page_info = get_rx_page_info(adapter, tail);
48810 + page_info = get_rx_page_info(adapter, rxo, tail);
48811 put_page(page_info->page);
48812 memset(page_info, 0, sizeof(*page_info));
48813 }
48814 BUG_ON(atomic_read(&rxq->used));
48815 + rxq->tail = rxq->head = 0;
48816 }
48817
48818 -static void be_tx_compl_clean(struct be_adapter *adapter)
48819 +static void be_tx_compl_clean(struct be_adapter *adapter,
48820 + struct be_tx_obj *txo)
48821 {
48822 - struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
48823 - struct be_queue_info *txq = &adapter->tx_obj.q;
48824 + struct be_queue_info *tx_cq = &txo->cq;
48825 + struct be_queue_info *txq = &txo->q;
48826 struct be_eth_tx_compl *txcp;
48827 - u16 end_idx, cmpl = 0, timeo = 0;
48828 + u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
48829 + struct sk_buff **sent_skbs = txo->sent_skb_list;
48830 + struct sk_buff *sent_skb;
48831 + bool dummy_wrb;
48832
48833 /* Wait for a max of 200ms for all the tx-completions to arrive. */
48834 do {
48835 while ((txcp = be_tx_compl_get(tx_cq))) {
48836 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
48837 wrb_index, txcp);
48838 - be_tx_compl_process(adapter, end_idx);
48839 + num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
48840 cmpl++;
48841 }
48842 if (cmpl) {
48843 be_cq_notify(adapter, tx_cq->id, false, cmpl);
48844 + atomic_sub(num_wrbs, &txq->used);
48845 cmpl = 0;
48846 + num_wrbs = 0;
48847 }
48848
48849 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
48850 @@ -1107,6 +1742,17 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
48851 if (atomic_read(&txq->used))
48852 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
48853 atomic_read(&txq->used));
48854 +
48855 + /* free posted tx for which compls will never arrive */
48856 + while (atomic_read(&txq->used)) {
48857 + sent_skb = sent_skbs[txq->tail];
48858 + end_idx = txq->tail;
48859 + index_adv(&end_idx,
48860 + wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
48861 + txq->len);
48862 + num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
48863 + atomic_sub(num_wrbs, &txq->used);
48864 + }
48865 }
48866
48867 static void be_mcc_queues_destroy(struct be_adapter *adapter)
48868 @@ -1145,8 +1791,9 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
48869 goto mcc_cq_destroy;
48870
48871 /* Ask BE to create MCC queue */
48872 - if (be_cmd_mccq_create(adapter, q, cq))
48873 + if (be_cmd_mccq_create(adapter, q, cq)) {
48874 goto mcc_q_free;
48875 + }
48876
48877 return 0;
48878
48879 @@ -1163,16 +1810,20 @@ err:
48880 static void be_tx_queues_destroy(struct be_adapter *adapter)
48881 {
48882 struct be_queue_info *q;
48883 + struct be_tx_obj *txo;
48884 + u8 i;
48885
48886 - q = &adapter->tx_obj.q;
48887 - if (q->created)
48888 - be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
48889 - be_queue_free(adapter, q);
48890 + for_all_tx_queues(adapter, txo, i) {
48891 + q = &txo->q;
48892 + if (q->created)
48893 + be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
48894 + be_queue_free(adapter, q);
48895
48896 - q = &adapter->tx_obj.cq;
48897 - if (q->created)
48898 - be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48899 - be_queue_free(adapter, q);
48900 + q = &txo->cq;
48901 + if (q->created)
48902 + be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48903 + be_queue_free(adapter, q);
48904 + }
48905
48906 /* Clear any residual events */
48907 be_eq_clean(adapter, &adapter->tx_eq);
48908 @@ -1183,168 +1834,210 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
48909 be_queue_free(adapter, q);
48910 }
48911
48912 +/* One TX event queue is shared by all TX compl qs */
48913 static int be_tx_queues_create(struct be_adapter *adapter)
48914 {
48915 struct be_queue_info *eq, *q, *cq;
48916 + struct be_tx_obj *txo;
48917 + u8 i, tc_id;
48918
48919 adapter->tx_eq.max_eqd = 0;
48920 adapter->tx_eq.min_eqd = 0;
48921 adapter->tx_eq.cur_eqd = 96;
48922 adapter->tx_eq.enable_aic = false;
48923 - /* Alloc Tx Event queue */
48924 +
48925 eq = &adapter->tx_eq.q;
48926 - if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
48927 + if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
48928 + sizeof(struct be_eq_entry)))
48929 return -1;
48930
48931 - /* Ask BE to create Tx Event queue */
48932 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
48933 - goto tx_eq_free;
48934 - /* Alloc TX eth compl queue */
48935 - cq = &adapter->tx_obj.cq;
48936 - if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
48937 + goto err;
48938 + adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
48939 +
48940 + for_all_tx_queues(adapter, txo, i) {
48941 + cq = &txo->cq;
48942 + if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
48943 sizeof(struct be_eth_tx_compl)))
48944 - goto tx_eq_destroy;
48945 + goto err;
48946
48947 - /* Ask BE to create Tx eth compl queue */
48948 - if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
48949 - goto tx_cq_free;
48950 + if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
48951 + goto err;
48952
48953 - /* Alloc TX eth queue */
48954 - q = &adapter->tx_obj.q;
48955 - if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
48956 - goto tx_cq_destroy;
48957 + q = &txo->q;
48958 + if (be_queue_alloc(adapter, q, TX_Q_LEN,
48959 + sizeof(struct be_eth_wrb)))
48960 + goto err;
48961
48962 - /* Ask BE to create Tx eth queue */
48963 - if (be_cmd_txq_create(adapter, q, cq))
48964 - goto tx_q_free;
48965 + if (be_cmd_txq_create(adapter, q, cq, &tc_id))
48966 + goto err;
48967 +
48968 + if (adapter->flags & BE_FLAGS_DCBX)
48969 + adapter->tc_txq_map[tc_id] = i;
48970 + }
48971 return 0;
48972
48973 -tx_q_free:
48974 - be_queue_free(adapter, q);
48975 -tx_cq_destroy:
48976 - be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
48977 -tx_cq_free:
48978 - be_queue_free(adapter, cq);
48979 -tx_eq_destroy:
48980 - be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
48981 -tx_eq_free:
48982 - be_queue_free(adapter, eq);
48983 +err:
48984 + be_tx_queues_destroy(adapter);
48985 return -1;
48986 }
48987
48988 static void be_rx_queues_destroy(struct be_adapter *adapter)
48989 {
48990 struct be_queue_info *q;
48991 + struct be_rx_obj *rxo;
48992 + int i;
48993
48994 - q = &adapter->rx_obj.q;
48995 - if (q->created) {
48996 - be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
48997 - be_rx_q_clean(adapter);
48998 - }
48999 - be_queue_free(adapter, q);
49000 + for_all_rx_queues(adapter, rxo, i) {
49001 + be_queue_free(adapter, &rxo->q);
49002 +
49003 + q = &rxo->cq;
49004 + if (q->created)
49005 + be_cmd_q_destroy(adapter, q, QTYPE_CQ);
49006 + be_queue_free(adapter, q);
49007
49008 - q = &adapter->rx_obj.cq;
49009 - if (q->created)
49010 - be_cmd_q_destroy(adapter, q, QTYPE_CQ);
49011 - be_queue_free(adapter, q);
49012 + q = &rxo->rx_eq.q;
49013 + if (q->created)
49014 + be_cmd_q_destroy(adapter, q, QTYPE_EQ);
49015 + be_queue_free(adapter, q);
49016
49017 - /* Clear any residual events */
49018 - be_eq_clean(adapter, &adapter->rx_eq);
49019 + kfree(rxo->page_info_tbl);
49020 + }
49021 +}
49022
49023 - q = &adapter->rx_eq.q;
49024 - if (q->created)
49025 - be_cmd_q_destroy(adapter, q, QTYPE_EQ);
49026 - be_queue_free(adapter, q);
49027 +/* Is BE in a multi-channel mode */
49028 +static inline bool be_is_mc(struct be_adapter *adapter) {
49029 + return (adapter->function_mode & FLEX10_MODE ||
49030 + adapter->function_mode & VNIC_MODE ||
49031 + adapter->function_mode & UMC_ENABLED);
49032 +}
49033 +
49034 +static u32 be_num_rxqs_want(struct be_adapter *adapter)
49035 +{
49036 + if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
49037 + adapter->num_vfs == 0 && be_physfn(adapter) &&
49038 + !be_is_mc(adapter)) {
49039 + return 1 + MAX_RSS_QS; /* one default non-RSS queue */
49040 + } else {
49041 + dev_warn(&adapter->pdev->dev,
49042 + "No support for multiple RX queues\n");
49043 + return 1;
49044 + }
49045 }
49046
49047 static int be_rx_queues_create(struct be_adapter *adapter)
49048 {
49049 struct be_queue_info *eq, *q, *cq;
49050 - int rc;
49051 + struct be_rx_obj *rxo;
49052 + int rc, i;
49053
49054 + adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
49055 + msix_enabled(adapter) ?
49056 + adapter->num_msix_vec - 1 : 1);
49057 + if (adapter->num_rx_qs != MAX_RX_QS)
49058 + dev_warn(&adapter->pdev->dev,
49059 + "Could create only %d receive queues",
49060 + adapter->num_rx_qs);
49061 +
49062 + adapter->max_rx_coal = gro ? BE_INIT_FRAGS_PER_FRAME : 1;
49063 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
49064 - adapter->rx_eq.max_eqd = BE_MAX_EQD;
49065 - adapter->rx_eq.min_eqd = 0;
49066 - adapter->rx_eq.cur_eqd = 0;
49067 - adapter->rx_eq.enable_aic = true;
49068 + for_all_rx_queues(adapter, rxo, i) {
49069 + rxo->adapter = adapter;
49070 + rxo->rx_eq.max_eqd = BE_MAX_EQD;
49071 + rxo->rx_eq.enable_aic = true;
49072
49073 - /* Alloc Rx Event queue */
49074 - eq = &adapter->rx_eq.q;
49075 - rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
49076 - sizeof(struct be_eq_entry));
49077 - if (rc)
49078 - return rc;
49079 + /* EQ */
49080 + eq = &rxo->rx_eq.q;
49081 + rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
49082 + sizeof(struct be_eq_entry));
49083 + if (rc)
49084 + goto err;
49085
49086 - /* Ask BE to create Rx Event queue */
49087 - rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
49088 - if (rc)
49089 - goto rx_eq_free;
49090 + rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
49091 + if (rc)
49092 + goto err;
49093
49094 - /* Alloc RX eth compl queue */
49095 - cq = &adapter->rx_obj.cq;
49096 - rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
49097 - sizeof(struct be_eth_rx_compl));
49098 - if (rc)
49099 - goto rx_eq_destroy;
49100 + rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
49101
49102 - /* Ask BE to create Rx eth compl queue */
49103 - rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
49104 - if (rc)
49105 - goto rx_cq_free;
49106 + /* CQ */
49107 + cq = &rxo->cq;
49108 + rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
49109 + sizeof(struct be_eth_rx_compl));
49110 + if (rc)
49111 + goto err;
49112
49113 - /* Alloc RX eth queue */
49114 - q = &adapter->rx_obj.q;
49115 - rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
49116 - if (rc)
49117 - goto rx_cq_destroy;
49118 + rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
49119 + if (rc)
49120 + goto err;
49121
49122 - /* Ask BE to create Rx eth queue */
49123 - rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
49124 - BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
49125 - if (rc)
49126 - goto rx_q_free;
49127 + /* Rx Q - will be created in be_open() */
49128 + q = &rxo->q;
49129 + rc = be_queue_alloc(adapter, q, RX_Q_LEN,
49130 + sizeof(struct be_eth_rx_d));
49131 + if (rc)
49132 + goto err;
49133 +
49134 + rxo->page_info_tbl = kzalloc(sizeof(struct be_rx_page_info) *
49135 + RX_Q_LEN, GFP_KERNEL);
49136 + if (!rxo->page_info_tbl)
49137 + goto err;
49138 + }
49139
49140 return 0;
49141 -rx_q_free:
49142 - be_queue_free(adapter, q);
49143 -rx_cq_destroy:
49144 - be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
49145 -rx_cq_free:
49146 - be_queue_free(adapter, cq);
49147 -rx_eq_destroy:
49148 - be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
49149 -rx_eq_free:
49150 - be_queue_free(adapter, eq);
49151 - return rc;
49152 +err:
49153 + be_rx_queues_destroy(adapter);
49154 + return -1;
49155 }
49156
49157 -/* There are 8 evt ids per func. Retruns the evt id's bit number */
49158 -static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
49159 +static bool event_peek(struct be_eq_obj *eq_obj)
49160 {
49161 - return eq_id - 8 * be_pci_func(adapter);
49162 + struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
49163 + if (!eqe->evt)
49164 + return false;
49165 + else
49166 + return true;
49167 }
49168
49169 static irqreturn_t be_intx(int irq, void *dev)
49170 {
49171 struct be_adapter *adapter = dev;
49172 - int isr;
49173 + struct be_rx_obj *rxo;
49174 + int isr, i, tx = 0 , rx = 0;
49175
49176 - isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
49177 - be_pci_func(adapter) * CEV_ISR_SIZE);
49178 - if (!isr)
49179 - return IRQ_NONE;
49180 + if (lancer_chip(adapter)) {
49181 + if (event_peek(&adapter->tx_eq))
49182 + tx = event_handle(adapter, &adapter->tx_eq, false);
49183 + for_all_rx_queues(adapter, rxo, i) {
49184 + if (event_peek(&rxo->rx_eq))
49185 + rx |= event_handle(adapter, &rxo->rx_eq, true);
49186 + }
49187
49188 - event_handle(adapter, &adapter->tx_eq);
49189 - event_handle(adapter, &adapter->rx_eq);
49190 + if (!(tx || rx))
49191 + return IRQ_NONE;
49192 + } else {
49193 + isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
49194 + (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
49195 + if (!isr)
49196 + return IRQ_NONE;
49197 +
49198 + if ((1 << adapter->tx_eq.eq_idx & isr))
49199 + event_handle(adapter, &adapter->tx_eq, false);
49200 +
49201 + for_all_rx_queues(adapter, rxo, i) {
49202 + if ((1 << rxo->rx_eq.eq_idx & isr))
49203 + event_handle(adapter, &rxo->rx_eq, true);
49204 + }
49205 + }
49206
49207 return IRQ_HANDLED;
49208 }
49209
49210 static irqreturn_t be_msix_rx(int irq, void *dev)
49211 {
49212 - struct be_adapter *adapter = dev;
49213 + struct be_rx_obj *rxo = dev;
49214 + struct be_adapter *adapter = rxo->adapter;
49215
49216 - event_handle(adapter, &adapter->rx_eq);
49217 + event_handle(adapter, &rxo->rx_eq, true);
49218
49219 return IRQ_HANDLED;
49220 }
49221 @@ -1353,48 +2046,72 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
49222 {
49223 struct be_adapter *adapter = dev;
49224
49225 - event_handle(adapter, &adapter->tx_eq);
49226 + event_handle(adapter, &adapter->tx_eq, false);
49227
49228 return IRQ_HANDLED;
49229 }
49230
49231 static inline bool do_gro(struct be_adapter *adapter,
49232 - struct be_eth_rx_compl *rxcp)
49233 + struct be_rx_compl_info *rxcp)
49234 {
49235 - int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
49236 - int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
49237 -
49238 - if (err)
49239 - drvr_stats(adapter)->be_rxcp_err++;
49240 -
49241 - return (tcp_frame && !err) ? true : false;
49242 + return (!rxcp->tcpf || rxcp->err || adapter->max_rx_coal <= 1 ||
49243 + (rxcp->vlanf && !vlan_configured(adapter))) ?
49244 + false : true;
49245 }
49246
49247 int be_poll_rx(struct napi_struct *napi, int budget)
49248 {
49249 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
49250 - struct be_adapter *adapter =
49251 - container_of(rx_eq, struct be_adapter, rx_eq);
49252 - struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
49253 - struct be_eth_rx_compl *rxcp;
49254 + struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
49255 + struct be_adapter *adapter = rxo->adapter;
49256 + struct be_queue_info *rx_cq = &rxo->cq;
49257 + struct be_rx_compl_info *rxcp;
49258 u32 work_done;
49259 + bool flush_lro = false;
49260
49261 + rxo->stats.rx_polls++;
49262 for (work_done = 0; work_done < budget; work_done++) {
49263 - rxcp = be_rx_compl_get(adapter);
49264 + rxcp = be_rx_compl_get(rxo);
49265 if (!rxcp)
49266 break;
49267
49268 - if (do_gro(adapter, rxcp))
49269 - be_rx_compl_process_gro(adapter, rxcp);
49270 - else
49271 - be_rx_compl_process(adapter, rxcp);
49272 + /* Is it a flush compl that has no data */
49273 + if (unlikely(rxcp->num_rcvd == 0))
49274 + continue;
49275
49276 - be_rx_compl_reset(rxcp);
49277 + if (unlikely(rxcp->port != adapter->port_num)) {
49278 + be_rx_compl_discard(adapter, rxo, rxcp);
49279 + be_rx_stats_update(rxo, rxcp);
49280 + continue;
49281 + }
49282 +
49283 + if (likely((lancer_A0_chip(adapter) && !rxcp->err) ||
49284 + !lancer_A0_chip(adapter))) {
49285 + if (do_gro(adapter, rxcp)) {
49286 + if (adapter->gro_supported) {
49287 + be_rx_compl_process_gro(adapter, rxo,
49288 + rxcp);
49289 + } else {
49290 + be_rx_compl_process_lro(adapter, rxo,
49291 + rxcp);
49292 + flush_lro = true;
49293 + }
49294 + } else {
49295 + be_rx_compl_process(adapter, rxo, rxcp);
49296 + }
49297 + } else if (lancer_A0_chip(adapter) && rxcp->err) {
49298 + be_rx_compl_discard(adapter, rxo, rxcp);
49299 + }
49300 +
49301 + be_rx_stats_update(rxo, rxcp);
49302 }
49303
49304 + if (flush_lro)
49305 + lro_flush_all(&rxo->lro_mgr);
49306 +
49307 /* Refill the queue */
49308 - if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
49309 - be_post_rx_frags(adapter);
49310 + if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
49311 + be_post_rx_frags(rxo);
49312
49313 /* All consumed */
49314 if (work_done < budget) {
49315 @@ -1404,40 +2121,13 @@ int be_poll_rx(struct napi_struct *napi, int budget)
49316 /* More to be consumed; continue with interrupts disabled */
49317 be_cq_notify(adapter, rx_cq->id, false, work_done);
49318 }
49319 +
49320 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
49321 + adapter->netdev->last_rx = jiffies;
49322 +#endif
49323 return work_done;
49324 }
49325
49326 -void be_process_tx(struct be_adapter *adapter)
49327 -{
49328 - struct be_queue_info *txq = &adapter->tx_obj.q;
49329 - struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
49330 - struct be_eth_tx_compl *txcp;
49331 - u32 num_cmpl = 0;
49332 - u16 end_idx;
49333 -
49334 - while ((txcp = be_tx_compl_get(tx_cq))) {
49335 - end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
49336 - wrb_index, txcp);
49337 - be_tx_compl_process(adapter, end_idx);
49338 - num_cmpl++;
49339 - }
49340 -
49341 - if (num_cmpl) {
49342 - be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
49343 -
49344 - /* As Tx wrbs have been freed up, wake up netdev queue if
49345 - * it was stopped due to lack of tx wrbs.
49346 - */
49347 - if (netif_queue_stopped(adapter->netdev) &&
49348 - atomic_read(&txq->used) < txq->len / 2) {
49349 - netif_wake_queue(adapter->netdev);
49350 - }
49351 -
49352 - drvr_stats(adapter)->be_tx_events++;
49353 - drvr_stats(adapter)->be_tx_compl += num_cmpl;
49354 - }
49355 -}
49356 -
49357 /* As TX and MCC share the same EQ check for both TX and MCC completions.
49358 * For TX/MCC we don't honour budget; consume everything
49359 */
49360 @@ -1446,96 +2136,264 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
49361 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
49362 struct be_adapter *adapter =
49363 container_of(tx_eq, struct be_adapter, tx_eq);
49364 + struct be_tx_obj *txo;
49365 + struct be_eth_tx_compl *txcp;
49366 + int tx_compl, mcc_compl, status = 0;
49367 + u8 i;
49368 + u16 num_wrbs;
49369 +
49370 + for_all_tx_queues(adapter, txo, i) {
49371 + tx_compl = 0;
49372 + num_wrbs = 0;
49373 + while ((txcp = be_tx_compl_get(&txo->cq))) {
49374 + num_wrbs += be_tx_compl_process(adapter, txo,
49375 + AMAP_GET_BITS(struct amap_eth_tx_compl,
49376 + wrb_index, txcp));
49377 + tx_compl++;
49378 + }
49379 + if (tx_compl) {
49380 + be_cq_notify(adapter, txo->cq.id, true, tx_compl);
49381 +
49382 + atomic_sub(num_wrbs, &txo->q.used);
49383 +
49384 + /* As Tx wrbs have been freed up, wake up netdev queue
49385 + * if it was stopped due to lack of tx wrbs. */
49386 + if (__netif_subqueue_stopped(adapter->netdev, i) &&
49387 + atomic_read(&txo->q.used) < txo->q.len / 2) {
49388 + netif_wake_subqueue(adapter->netdev, i);
49389 + }
49390 +
49391 + adapter->drv_stats.be_tx_events++;
49392 + txo->stats.be_tx_compl += tx_compl;
49393 + }
49394 + }
49395 +
49396 + mcc_compl = be_process_mcc(adapter, &status);
49397 +
49398 + if (mcc_compl) {
49399 + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
49400 + be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
49401 + }
49402
49403 napi_complete(napi);
49404
49405 - be_process_tx(adapter);
49406 -
49407 - be_process_mcc(adapter);
49408 -
49409 + be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
49410 return 1;
49411 }
49412
49413 +void be_detect_dump_ue(struct be_adapter *adapter)
49414 +{
49415 + u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
49416 + u32 i;
49417 +
49418 + pci_read_config_dword(adapter->pdev,
49419 + PCICFG_UE_STATUS_LOW, &ue_status_lo);
49420 + pci_read_config_dword(adapter->pdev,
49421 + PCICFG_UE_STATUS_HIGH, &ue_status_hi);
49422 + pci_read_config_dword(adapter->pdev,
49423 + PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
49424 + pci_read_config_dword(adapter->pdev,
49425 + PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
49426 +
49427 + ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
49428 + ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
49429 +
49430 + if (ue_status_lo || ue_status_hi) {
49431 + adapter->ue_detected = true;
49432 + adapter->eeh_err = true;
49433 + dev_err(&adapter->pdev->dev, "UE Detected!!\n");
49434 + }
49435 +
49436 + if (ue_status_lo) {
49437 + for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
49438 + if (ue_status_lo & 1)
49439 + dev_err(&adapter->pdev->dev,
49440 + "UE: %s bit set\n", ue_status_low_desc[i]);
49441 + }
49442 + }
49443 + if (ue_status_hi) {
49444 + for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
49445 + if (ue_status_hi & 1)
49446 + dev_err(&adapter->pdev->dev,
49447 + "UE: %s bit set\n", ue_status_hi_desc[i]);
49448 + }
49449 + }
49450 +
49451 +}
49452 +
49453 static void be_worker(struct work_struct *work)
49454 {
49455 struct be_adapter *adapter =
49456 container_of(work, struct be_adapter, work.work);
49457 + struct be_rx_obj *rxo;
49458 + struct be_tx_obj *txo;
49459 + int i;
49460
49461 - be_cmd_get_stats(adapter, &adapter->stats.cmd);
49462 + if (!adapter->ue_detected && !lancer_chip(adapter))
49463 + be_detect_dump_ue(adapter);
49464
49465 - /* Set EQ delay */
49466 - be_rx_eqd_update(adapter);
49467 + /* when interrupts are not yet enabled, just reap any pending
49468 + * mcc completions */
49469 + if (!netif_running(adapter->netdev)) {
49470 + int mcc_compl, status = 0;
49471
49472 - be_tx_rate_update(adapter);
49473 - be_rx_rate_update(adapter);
49474 + mcc_compl = be_process_mcc(adapter, &status);
49475
49476 - if (adapter->rx_post_starved) {
49477 - adapter->rx_post_starved = false;
49478 - be_post_rx_frags(adapter);
49479 + if (mcc_compl) {
49480 + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
49481 + be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
49482 + }
49483 +
49484 + goto reschedule;
49485 + }
49486 +
49487 + if (!adapter->stats_cmd_sent)
49488 + be_cmd_get_stats(adapter, &adapter->stats_cmd);
49489 +
49490 + for_all_tx_queues(adapter, txo, i)
49491 + be_tx_rate_update(txo);
49492 +
49493 + for_all_rx_queues(adapter, rxo, i) {
49494 + be_rx_rate_update(rxo);
49495 + be_rx_eqd_update(adapter, rxo);
49496 +
49497 + if (rxo->rx_post_starved) {
49498 + rxo->rx_post_starved = false;
49499 + be_post_rx_frags(rxo);
49500 + }
49501 }
49502
49503 +reschedule:
49504 + adapter->work_counter++;
49505 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
49506 }
49507
49508 +static void be_msix_disable(struct be_adapter *adapter)
49509 +{
49510 + if (msix_enabled(adapter)) {
49511 + pci_disable_msix(adapter->pdev);
49512 + adapter->num_msix_vec = 0;
49513 + }
49514 +}
49515 +
49516 static void be_msix_enable(struct be_adapter *adapter)
49517 {
49518 - int i, status;
49519 +#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
49520 + int i, status, num_vec;
49521
49522 - for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
49523 + num_vec = be_num_rxqs_want(adapter) + 1;
49524 +
49525 + for (i = 0; i < num_vec; i++)
49526 adapter->msix_entries[i].entry = i;
49527
49528 - status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
49529 - BE_NUM_MSIX_VECTORS);
49530 - if (status == 0)
49531 - adapter->msix_enabled = true;
49532 + status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
49533 + if (status == 0) {
49534 + goto done;
49535 + } else if (status >= BE_MIN_MSIX_VECTORS) {
49536 + num_vec = status;
49537 + if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
49538 + num_vec) == 0)
49539 + goto done;
49540 + }
49541 return;
49542 +done:
49543 + adapter->num_msix_vec = num_vec;
49544 + return;
49545 +}
49546 +
49547 +static void be_sriov_enable(struct be_adapter *adapter)
49548 +{
49549 + be_check_sriov_fn_type(adapter);
49550 +#ifdef CONFIG_PCI_IOV
49551 + if (be_physfn(adapter) && num_vfs) {
49552 + int status, pos;
49553 + u16 nvfs;
49554 +
49555 + pos = pci_find_ext_capability(adapter->pdev,
49556 + PCI_EXT_CAP_ID_SRIOV);
49557 + pci_read_config_word(adapter->pdev,
49558 + pos + PCI_SRIOV_TOTAL_VF, &nvfs);
49559 + adapter->num_vfs = num_vfs;
49560 + if (num_vfs > nvfs) {
49561 + dev_info(&adapter->pdev->dev,
49562 + "Device supports %d VFs and not %d\n",
49563 + nvfs, num_vfs);
49564 + adapter->num_vfs = nvfs;
49565 + }
49566 +
49567 + status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
49568 + if (status)
49569 + adapter->num_vfs = 0;
49570 + }
49571 +#endif
49572 +}
49573 +
49574 +static void be_sriov_disable(struct be_adapter *adapter)
49575 +{
49576 +#ifdef CONFIG_PCI_IOV
49577 + if (adapter->num_vfs > 0) {
49578 + pci_disable_sriov(adapter->pdev);
49579 + adapter->num_vfs = 0;
49580 + }
49581 +#endif
49582 }
49583
49584 -static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
49585 +static inline int be_msix_vec_get(struct be_adapter *adapter,
49586 + struct be_eq_obj *eq_obj)
49587 {
49588 - return adapter->msix_entries[
49589 - be_evt_bit_get(adapter, eq_id)].vector;
49590 + return adapter->msix_entries[eq_obj->eq_idx].vector;
49591 }
49592
49593 static int be_request_irq(struct be_adapter *adapter,
49594 struct be_eq_obj *eq_obj,
49595 - void *handler, char *desc)
49596 + void *handler, char *desc, void *context)
49597 {
49598 struct net_device *netdev = adapter->netdev;
49599 int vec;
49600
49601 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
49602 - vec = be_msix_vec_get(adapter, eq_obj->q.id);
49603 - return request_irq(vec, handler, 0, eq_obj->desc, adapter);
49604 + vec = be_msix_vec_get(adapter, eq_obj);
49605 + return request_irq(vec, handler, 0, eq_obj->desc, context);
49606 }
49607
49608 -static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
49609 +static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
49610 + void *context)
49611 {
49612 - int vec = be_msix_vec_get(adapter, eq_obj->q.id);
49613 - free_irq(vec, adapter);
49614 + int vec = be_msix_vec_get(adapter, eq_obj);
49615 + free_irq(vec, context);
49616 }
49617
49618 static int be_msix_register(struct be_adapter *adapter)
49619 {
49620 - int status;
49621 + struct be_rx_obj *rxo;
49622 + int status, i;
49623 + char qname[10];
49624
49625 - status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
49626 + status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
49627 + adapter);
49628 if (status)
49629 goto err;
49630
49631 - status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
49632 - if (status)
49633 - goto free_tx_irq;
49634 + for_all_rx_queues(adapter, rxo, i) {
49635 + sprintf(qname, "rxq%d", i);
49636 + status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
49637 + qname, rxo);
49638 + if (status)
49639 + goto err_msix;
49640 + }
49641
49642 return 0;
49643
49644 -free_tx_irq:
49645 - be_free_irq(adapter, &adapter->tx_eq);
49646 +err_msix:
49647 + be_free_irq(adapter, &adapter->tx_eq, adapter);
49648 +
49649 + for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
49650 + be_free_irq(adapter, &rxo->rx_eq, rxo);
49651 +
49652 err:
49653 dev_warn(&adapter->pdev->dev,
49654 "MSIX Request IRQ failed - err %d\n", status);
49655 - pci_disable_msix(adapter->pdev);
49656 - adapter->msix_enabled = false;
49657 + be_msix_disable(adapter);
49658 return status;
49659 }
49660
49661 @@ -1544,10 +2402,13 @@ static int be_irq_register(struct be_adapter *adapter)
49662 struct net_device *netdev = adapter->netdev;
49663 int status;
49664
49665 - if (adapter->msix_enabled) {
49666 + if (msix_enabled(adapter)) {
49667 status = be_msix_register(adapter);
49668 if (status == 0)
49669 goto done;
49670 + /* INTx is not supported for VF */
49671 + if (!be_physfn(adapter))
49672 + return status;
49673 }
49674
49675 /* INTx */
49676 @@ -1567,87 +2428,363 @@ done:
49677 static void be_irq_unregister(struct be_adapter *adapter)
49678 {
49679 struct net_device *netdev = adapter->netdev;
49680 + struct be_rx_obj *rxo;
49681 + int i;
49682
49683 if (!adapter->isr_registered)
49684 return;
49685
49686 /* INTx */
49687 - if (!adapter->msix_enabled) {
49688 + if (!msix_enabled(adapter)) {
49689 free_irq(netdev->irq, adapter);
49690 goto done;
49691 }
49692
49693 /* MSIx */
49694 - be_free_irq(adapter, &adapter->tx_eq);
49695 - be_free_irq(adapter, &adapter->rx_eq);
49696 + be_free_irq(adapter, &adapter->tx_eq, adapter);
49697 +
49698 + for_all_rx_queues(adapter, rxo, i)
49699 + be_free_irq(adapter, &rxo->rx_eq, rxo);
49700 +
49701 done:
49702 adapter->isr_registered = false;
49703 - return;
49704 }
49705
49706 -static int be_open(struct net_device *netdev)
49707 +static u16 be_select_queue(struct net_device *netdev,
49708 + struct sk_buff *skb)
49709 {
49710 struct be_adapter *adapter = netdev_priv(netdev);
49711 - struct be_eq_obj *rx_eq = &adapter->rx_eq;
49712 + u8 prio;
49713 +
49714 + if (adapter->num_tx_qs == 1)
49715 + return 0;
49716 +
49717 + prio = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
49718 + return adapter->tc_txq_map[adapter->prio_tc_map[prio]];
49719 +}
49720 +
49721 +static void be_rx_queues_clear(struct be_adapter *adapter)
49722 +{
49723 + struct be_queue_info *q;
49724 + struct be_rx_obj *rxo;
49725 + int i;
49726 +
49727 + for_all_rx_queues(adapter, rxo, i) {
49728 + q = &rxo->q;
49729 + if (q->created) {
49730 + be_cmd_rxq_destroy(adapter, q);
49731 + /* After the rxq is invalidated, wait for a grace time
49732 + * of 1ms for all dma to end and the flush compl to
49733 + * arrive
49734 + */
49735 + mdelay(1);
49736 + be_rx_q_clean(adapter, rxo);
49737 + }
49738 +
49739 + /* Clear any residual events */
49740 + q = &rxo->rx_eq.q;
49741 + if (q->created)
49742 + be_eq_clean(adapter, &rxo->rx_eq);
49743 + }
49744 +}
49745 +
49746 +static int be_close(struct net_device *netdev)
49747 +{
49748 + struct be_adapter *adapter = netdev_priv(netdev);
49749 + struct be_rx_obj *rxo;
49750 + struct be_tx_obj *txo;
49751 struct be_eq_obj *tx_eq = &adapter->tx_eq;
49752 - bool link_up;
49753 - int status;
49754 + int vec, i;
49755 +
49756 + be_async_mcc_disable(adapter);
49757 +
49758 + netif_stop_queue(netdev);
49759 + netif_carrier_off(netdev);
49760 + adapter->link_status = LINK_DOWN;
49761 +
49762 + if (!lancer_chip(adapter))
49763 + be_intr_set(adapter, false);
49764 +
49765 + for_all_rx_queues(adapter, rxo, i)
49766 + napi_disable(&rxo->rx_eq.napi);
49767 +
49768 + napi_disable(&tx_eq->napi);
49769 +
49770 + if (lancer_chip(adapter)) {
49771 + be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
49772 + for_all_rx_queues(adapter, rxo, i)
49773 + be_cq_notify(adapter, rxo->cq.id, false, 0);
49774 + for_all_tx_queues(adapter, txo, i)
49775 + be_cq_notify(adapter, txo->cq.id, false, 0);
49776 + }
49777 +
49778 + if (msix_enabled(adapter)) {
49779 + vec = be_msix_vec_get(adapter, tx_eq);
49780 + synchronize_irq(vec);
49781 +
49782 + for_all_rx_queues(adapter, rxo, i) {
49783 + vec = be_msix_vec_get(adapter, &rxo->rx_eq);
49784 + synchronize_irq(vec);
49785 + }
49786 + } else {
49787 + synchronize_irq(netdev->irq);
49788 + }
49789 + be_irq_unregister(adapter);
49790 +
49791 + /* Wait for all pending tx completions to arrive so that
49792 + * all tx skbs are freed.
49793 + */
49794 + for_all_tx_queues(adapter, txo, i)
49795 + be_tx_compl_clean(adapter, txo);
49796 +
49797 + be_rx_queues_clear(adapter);
49798 + return 0;
49799 +}
49800 +
49801 +static int be_rx_queues_setup(struct be_adapter *adapter)
49802 +{
49803 + struct be_rx_obj *rxo;
49804 + int rc, i;
49805 + u8 rsstable[MAX_RSS_QS];
49806 +
49807 + for_all_rx_queues(adapter, rxo, i) {
49808 + rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
49809 + rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
49810 + adapter->if_handle,
49811 + (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
49812 + if (rc)
49813 + return rc;
49814 + }
49815 +
49816 + if (be_multi_rxq(adapter)) {
49817 + for_all_rss_queues(adapter, rxo, i)
49818 + rsstable[i] = rxo->rss_id;
49819 +
49820 + rc = be_cmd_rss_config(adapter, rsstable,
49821 + adapter->num_rx_qs - 1);
49822 + if (rc)
49823 + return rc;
49824 + }
49825
49826 /* First time posting */
49827 - be_post_rx_frags(adapter);
49828 + for_all_rx_queues(adapter, rxo, i) {
49829 + be_post_rx_frags(rxo);
49830 + napi_enable(&rxo->rx_eq.napi);
49831 + }
49832 + return 0;
49833 +}
49834 +
49835 +static int be_open(struct net_device *netdev)
49836 +{
49837 + struct be_adapter *adapter = netdev_priv(netdev);
49838 + struct be_eq_obj *tx_eq = &adapter->tx_eq;
49839 + struct be_rx_obj *rxo;
49840 + int link_status;
49841 + int status, i;
49842 + u8 mac_speed;
49843 + u16 link_speed;
49844 +
49845 + status = be_rx_queues_setup(adapter);
49846 + if (status)
49847 + goto err;
49848
49849 - napi_enable(&rx_eq->napi);
49850 napi_enable(&tx_eq->napi);
49851
49852 be_irq_register(adapter);
49853
49854 - be_intr_set(adapter, true);
49855 + if (!lancer_chip(adapter))
49856 + be_intr_set(adapter, true);
49857
49858 /* The evt queues are created in unarmed state; arm them */
49859 - be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
49860 + for_all_rx_queues(adapter, rxo, i) {
49861 + be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
49862 + be_cq_notify(adapter, rxo->cq.id, true, 0);
49863 + }
49864 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
49865
49866 - /* Rx compl queue may be in unarmed state; rearm it */
49867 - be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
49868 + /* Now that interrupts are on we can process async mcc */
49869 + be_async_mcc_enable(adapter);
49870
49871 - status = be_cmd_link_status_query(adapter, &link_up);
49872 + status = be_cmd_link_status_query(adapter, &link_status, &mac_speed,
49873 + &link_speed, 0);
49874 if (status)
49875 - goto ret_sts;
49876 - be_link_status_update(adapter, link_up);
49877 + goto err;
49878 + be_link_status_update(adapter, link_status);
49879
49880 - status = be_vid_config(adapter);
49881 + status = be_vid_config(adapter, false, 0);
49882 if (status)
49883 - goto ret_sts;
49884 + goto err;
49885
49886 - status = be_cmd_set_flow_control(adapter,
49887 - adapter->tx_fc, adapter->rx_fc);
49888 - if (status)
49889 - goto ret_sts;
49890 + if (be_physfn(adapter)) {
49891 + status = be_cmd_set_flow_control(adapter,
49892 + adapter->tx_fc, adapter->rx_fc);
49893 + if (status)
49894 + goto err;
49895 + }
49896 +
49897 + return 0;
49898 +err:
49899 + be_close(adapter->netdev);
49900 + return -EIO;
49901 +}
49902 +
49903 +static int be_setup_wol(struct be_adapter *adapter, bool enable)
49904 +{
49905 + struct be_dma_mem cmd;
49906 + int status = 0;
49907 + u8 mac[ETH_ALEN];
49908 +
49909 + memset(mac, 0, ETH_ALEN);
49910 +
49911 + cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
49912 + cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
49913 + if (cmd.va == NULL)
49914 + return -1;
49915 + memset(cmd.va, 0, cmd.size);
49916 +
49917 + if (enable) {
49918 + status = pci_write_config_dword(adapter->pdev,
49919 + PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
49920 + if (status) {
49921 + dev_err(&adapter->pdev->dev,
49922 + "Could not enable Wake-on-lan\n");
49923 + pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
49924 + cmd.dma);
49925 + return status;
49926 + }
49927 + status = be_cmd_enable_magic_wol(adapter,
49928 + adapter->netdev->dev_addr, &cmd);
49929 + pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
49930 + pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
49931 + } else {
49932 + status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
49933 + pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
49934 + pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
49935 + }
49936 +
49937 + pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
49938 + return status;
49939 +}
49940 +
49941 +/*
49942 + * Generate a seed MAC address from the PF MAC Address using jhash.
49943 + * MAC Address for VFs are assigned incrementally starting from the seed.
49944 + * These addresses are programmed in the ASIC by the PF and the VF driver
49945 + * queries for the MAC address during its probe.
49946 + */
49947 +static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
49948 +{
49949 + u32 vf = 0;
49950 + int status = 0;
49951 + u8 mac[ETH_ALEN];
49952 +
49953 + be_vf_eth_addr_generate(adapter, mac);
49954 +
49955 + for (vf = 0; vf < adapter->num_vfs; vf++) {
49956 + status = be_cmd_pmac_add(adapter, mac,
49957 + adapter->vf_cfg[vf].vf_if_handle,
49958 + &adapter->vf_cfg[vf].vf_pmac_id,
49959 + vf + 1);
49960 + if (status)
49961 + dev_err(&adapter->pdev->dev,
49962 + "Mac address add failed for VF %d\n", vf);
49963 + else
49964 + memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
49965
49966 - schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
49967 -ret_sts:
49968 + mac[5] += 1;
49969 + }
49970 return status;
49971 }
49972
49973 +static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
49974 +{
49975 + u32 vf;
49976 +
49977 + for (vf = 0; vf < adapter->num_vfs; vf++) {
49978 + if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
49979 + be_cmd_pmac_del(adapter,
49980 + adapter->vf_cfg[vf].vf_if_handle,
49981 + adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
49982 + }
49983 +}
49984 +
49985 +static int be_num_txqs_want(struct be_adapter *adapter)
49986 +{
49987 + if (adapter->num_vfs > 0 || be_is_mc(adapter) ||
49988 + lancer_chip(adapter) || !be_physfn(adapter) ||
49989 + adapter->generation == BE_GEN2)
49990 + return 1;
49991 + else
49992 + return MAX_TX_QS;
49993 +}
49994 +
49995 static int be_setup(struct be_adapter *adapter)
49996 {
49997 struct net_device *netdev = adapter->netdev;
49998 - u32 cap_flags, en_flags;
49999 - int status;
50000 -
50001 - cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
50002 - BE_IF_FLAGS_MCAST_PROMISCUOUS |
50003 - BE_IF_FLAGS_PROMISCUOUS |
50004 - BE_IF_FLAGS_PASS_L3L4_ERRORS;
50005 - en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
50006 - BE_IF_FLAGS_PASS_L3L4_ERRORS;
50007 + int status, fw_num_txqs, num_txqs;
50008 + u32 cap_flags, en_flags, vf = 0;
50009 + u8 mac[ETH_ALEN];
50010 +
50011 + num_txqs = be_num_txqs_want(adapter);
50012 + if (num_txqs > 1) {
50013 + be_cmd_req_pg_pfc(adapter, &fw_num_txqs);
50014 + num_txqs = min(num_txqs, fw_num_txqs);
50015 + }
50016 + adapter->num_tx_qs = num_txqs;
50017 + if (adapter->num_tx_qs != MAX_TX_QS)
50018 + netif_set_real_num_tx_queues(adapter->netdev,
50019 + adapter->num_tx_qs);
50020 +
50021 + be_cmd_req_native_mode(adapter);
50022 +
50023 + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
50024 + BE_IF_FLAGS_BROADCAST |
50025 + BE_IF_FLAGS_MULTICAST;
50026 +
50027 + if (be_physfn(adapter)) {
50028 + if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
50029 + cap_flags |= BE_IF_FLAGS_RSS;
50030 + en_flags |= BE_IF_FLAGS_RSS;
50031 + }
50032 + cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
50033 + BE_IF_FLAGS_PROMISCUOUS;
50034 + if (!lancer_A0_chip(adapter)) {
50035 + cap_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
50036 + en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
50037 + }
50038 + }
50039
50040 status = be_cmd_if_create(adapter, cap_flags, en_flags,
50041 netdev->dev_addr, false/* pmac_invalid */,
50042 - &adapter->if_handle, &adapter->pmac_id);
50043 + &adapter->if_handle, &adapter->pmac_id, 0);
50044 if (status != 0)
50045 goto do_none;
50046
50047 + if (be_physfn(adapter)) {
50048 + while (vf < adapter->num_vfs) {
50049 + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
50050 + BE_IF_FLAGS_BROADCAST;
50051 + status = be_cmd_if_create(adapter, cap_flags,
50052 + en_flags, mac, true,
50053 + &adapter->vf_cfg[vf].vf_if_handle,
50054 + NULL, vf+1);
50055 + if (status) {
50056 + dev_err(&adapter->pdev->dev,
50057 + "Interface Create failed for VF %d\n", vf);
50058 + goto if_destroy;
50059 + }
50060 + adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
50061 + vf++;
50062 + }
50063 + } else {
50064 + status = be_cmd_mac_addr_query(adapter, mac,
50065 + MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
50066 + if (!status) {
50067 + memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
50068 + memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
50069 + }
50070 + }
50071 +
50072 status = be_tx_queues_create(adapter);
50073 if (status != 0)
50074 goto if_destroy;
50075 @@ -1656,10 +2793,15 @@ static int be_setup(struct be_adapter *adapter)
50076 if (status != 0)
50077 goto tx_qs_destroy;
50078
50079 + /* Allow all priorities by default. A GRP5 evt may modify this */
50080 + adapter->vlan_prio_bmap = 0xff;
50081 +
50082 status = be_mcc_queues_create(adapter);
50083 if (status != 0)
50084 goto rx_qs_destroy;
50085
50086 + adapter->link_speed = -1;
50087 +
50088 return 0;
50089
50090 rx_qs_destroy:
50091 @@ -1667,158 +2809,392 @@ rx_qs_destroy:
50092 tx_qs_destroy:
50093 be_tx_queues_destroy(adapter);
50094 if_destroy:
50095 - be_cmd_if_destroy(adapter, adapter->if_handle);
50096 + if (be_physfn(adapter)) {
50097 + for (vf = 0; vf < adapter->num_vfs; vf++)
50098 + if (adapter->vf_cfg[vf].vf_if_handle)
50099 + be_cmd_if_destroy(adapter,
50100 + adapter->vf_cfg[vf].vf_if_handle,
50101 + vf + 1);
50102 + }
50103 + be_cmd_if_destroy(adapter, adapter->if_handle, 0);
50104 do_none:
50105 return status;
50106 }
50107
50108 static int be_clear(struct be_adapter *adapter)
50109 {
50110 + int vf;
50111 +
50112 + if (be_physfn(adapter) && adapter->num_vfs)
50113 + be_vf_eth_addr_rem(adapter);
50114 +
50115 be_mcc_queues_destroy(adapter);
50116 be_rx_queues_destroy(adapter);
50117 be_tx_queues_destroy(adapter);
50118 + adapter->eq_next_idx = 0;
50119
50120 - be_cmd_if_destroy(adapter, adapter->if_handle);
50121 + if (be_physfn(adapter)) {
50122 + for (vf = 0; vf < adapter->num_vfs; vf++)
50123 + if (adapter->vf_cfg[vf].vf_if_handle)
50124 + be_cmd_if_destroy(adapter,
50125 + adapter->vf_cfg[vf].vf_if_handle, vf + 1);
50126 + }
50127 + be_cmd_if_destroy(adapter, adapter->if_handle, 0);
50128
50129 + /* tell fw we're done with firing cmds */
50130 + be_cmd_fw_clean(adapter);
50131 return 0;
50132 }
50133
50134 -static int be_close(struct net_device *netdev)
50135 +static void be_cpy_drv_ver(struct be_adapter *adapter, void *va)
50136 +{
50137 + struct mgmt_controller_attrib *attrib =
50138 + (struct mgmt_controller_attrib *) ((u8*) va +
50139 + sizeof(struct be_cmd_resp_hdr));
50140 +
50141 + memcpy(attrib->hba_attribs.driver_version_string,
50142 + DRV_VER, sizeof(DRV_VER));
50143 + attrib->pci_bus_number = adapter->pdev->bus->number;
50144 + attrib->pci_device_number = PCI_SLOT(adapter->pdev->devfn);
50145 + return;
50146 +}
50147 +
50148 +#define IOCTL_COOKIE "SERVERENGINES CORP"
50149 +static int be_do_ioctl(struct net_device *netdev,
50150 + struct ifreq *ifr, int cmd)
50151 {
50152 struct be_adapter *adapter = netdev_priv(netdev);
50153 - struct be_eq_obj *rx_eq = &adapter->rx_eq;
50154 - struct be_eq_obj *tx_eq = &adapter->tx_eq;
50155 - int vec;
50156 + struct be_cmd_req_hdr req;
50157 + struct be_cmd_resp_hdr *resp;
50158 + void *data = ifr->ifr_data;
50159 + void *ioctl_ptr;
50160 + void *va;
50161 + dma_addr_t dma;
50162 + u32 req_size;
50163 + int status, ret = 0;
50164 + u8 cookie[32];
50165 +
50166 + switch (cmd) {
50167 + case SIOCDEVPRIVATE:
50168 + if (copy_from_user(cookie, data, strlen(IOCTL_COOKIE)))
50169 + return -EFAULT;
50170 +
50171 + if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
50172 + return -EINVAL;
50173
50174 - cancel_delayed_work_sync(&adapter->work);
50175 + ioctl_ptr = (u8 *)data + strlen(IOCTL_COOKIE);
50176 + if (copy_from_user(&req, ioctl_ptr,
50177 + sizeof(struct be_cmd_req_hdr)))
50178 + return -EFAULT;
50179
50180 - netif_stop_queue(netdev);
50181 - netif_carrier_off(netdev);
50182 - adapter->link_up = false;
50183 + req_size = le32_to_cpu(req.request_length);
50184 + if (req_size > 65536)
50185 + return -EINVAL;
50186
50187 - be_intr_set(adapter, false);
50188 + req_size += sizeof(struct be_cmd_req_hdr);
50189 + va = pci_alloc_consistent(adapter->pdev, req_size, &dma);
50190 + if (!va)
50191 + return -ENOMEM;
50192 + if (copy_from_user(va, ioctl_ptr, req_size)) {
50193 + ret = -EFAULT;
50194 + break;
50195 + }
50196
50197 - if (adapter->msix_enabled) {
50198 - vec = be_msix_vec_get(adapter, tx_eq->q.id);
50199 - synchronize_irq(vec);
50200 - vec = be_msix_vec_get(adapter, rx_eq->q.id);
50201 - synchronize_irq(vec);
50202 - } else {
50203 - synchronize_irq(netdev->irq);
50204 + status = be_cmd_pass_ext_ioctl(adapter, dma, req_size, va);
50205 + if (status == -1) {
50206 + ret = -EIO;
50207 + break;
50208 + }
50209 +
50210 + resp = (struct be_cmd_resp_hdr *) va;
50211 + if (!status) {
50212 + if (req.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES)
50213 + be_cpy_drv_ver(adapter, va);
50214 + }
50215 +
50216 + if (copy_to_user(ioctl_ptr, va, req_size)) {
50217 + ret = -EFAULT;
50218 + break;
50219 + }
50220 + break;
50221 + default:
50222 + return -EOPNOTSUPP;
50223 }
50224 - be_irq_unregister(adapter);
50225
50226 - napi_disable(&rx_eq->napi);
50227 - napi_disable(&tx_eq->napi);
50228 + if (va)
50229 + pci_free_consistent(adapter->pdev, req_size, va, dma);
50230 +
50231 + return ret;
50232 +}
50233 +
50234 +#ifdef CONFIG_NET_POLL_CONTROLLER
50235 +static void be_netpoll(struct net_device *netdev)
50236 +{
50237 + struct be_adapter *adapter = netdev_priv(netdev);
50238 + struct be_rx_obj *rxo;
50239 + int i;
50240
50241 - /* Wait for all pending tx completions to arrive so that
50242 - * all tx skbs are freed.
50243 - */
50244 - be_tx_compl_clean(adapter);
50245 + event_handle(adapter, &adapter->tx_eq, false);
50246 + for_all_rx_queues(adapter, rxo, i)
50247 + event_handle(adapter, &rxo->rx_eq, true);
50248 +
50249 + return;
50250 +}
50251 +#endif
50252 +
50253 +static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
50254 + void **ip_hdr, void **tcpudp_hdr,
50255 + u64 *hdr_flags, void *priv)
50256 +{
50257 + struct ethhdr *eh;
50258 + struct vlan_ethhdr *veh;
50259 + struct iphdr *iph;
50260 + u8 *va = page_address(frag->page) + frag->page_offset;
50261 + unsigned long ll_hlen;
50262 +
50263 + prefetch(va);
50264 + eh = (struct ethhdr *)va;
50265 + *mac_hdr = eh;
50266 + ll_hlen = ETH_HLEN;
50267 + if (eh->h_proto != htons(ETH_P_IP)) {
50268 + if (eh->h_proto == htons(ETH_P_8021Q)) {
50269 + veh = (struct vlan_ethhdr *)va;
50270 + if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
50271 + return -1;
50272 +
50273 + ll_hlen += VLAN_HLEN;
50274 + } else {
50275 + return -1;
50276 + }
50277 + }
50278 + *hdr_flags = LRO_IPV4;
50279 + iph = (struct iphdr *)(va + ll_hlen);
50280 + *ip_hdr = iph;
50281 + if (iph->protocol != IPPROTO_TCP)
50282 + return -1;
50283 + *hdr_flags |= LRO_TCP;
50284 + *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
50285
50286 return 0;
50287 }
50288
50289 -#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
50290 +static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
50291 +{
50292 + struct net_lro_mgr *lro_mgr;
50293 + struct be_rx_obj *rxo;
50294 + int i;
50295 +
50296 + for_all_rx_queues(adapter, rxo, i) {
50297 + lro_mgr = &rxo->lro_mgr;
50298 + lro_mgr->dev = netdev;
50299 + lro_mgr->features = LRO_F_NAPI;
50300 + lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
50301 + lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
50302 + lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
50303 + lro_mgr->lro_arr = rxo->lro_desc;
50304 + lro_mgr->get_frag_header = be_get_frag_header;
50305 + lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
50306 + }
50307 +
50308 +#ifdef NETIF_F_GRO
50309 + netdev->features |= NETIF_F_GRO;
50310 + adapter->gro_supported = true;
50311 +#endif
50312 +}
50313 +
50314 +#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
50315 char flash_cookie[2][16] = {"*** SE FLAS",
50316 "H DIRECTORY *** "};
50317 -static int be_flash_image(struct be_adapter *adapter,
50318 +
50319 +static bool be_flash_redboot(struct be_adapter *adapter,
50320 + const u8 *p, u32 img_start, int image_size,
50321 + int hdr_size)
50322 +{
50323 + u32 crc_offset;
50324 + u8 flashed_crc[4];
50325 + int status;
50326 +
50327 + crc_offset = hdr_size + img_start + image_size - 4;
50328 +
50329 + p += crc_offset;
50330 +
50331 + status = be_cmd_get_flash_crc(adapter, flashed_crc,
50332 + (image_size - 4));
50333 + if (status) {
50334 + dev_err(&adapter->pdev->dev,
50335 + "could not get crc from flash, not flashing redboot\n");
50336 + return false;
50337 + }
50338 +
50339 + /*update redboot only if crc does not match*/
50340 + if (!memcmp(flashed_crc, p, 4))
50341 + return false;
50342 + else
50343 + return true;
50344 +}
50345 +
50346 +static bool phy_flashing_required(struct be_adapter *adapter)
50347 +{
50348 + int status = 0;
50349 + struct be_phy_info phy_info;
50350 +
50351 + status = be_cmd_get_phy_info(adapter, &phy_info);
50352 + if (status)
50353 + return false;
50354 + if ((phy_info.phy_type == TN_8022) &&
50355 + (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
50356 + return true;
50357 + }
50358 + return false;
50359 +}
50360 +
50361 +static int be_flash_data(struct be_adapter *adapter,
50362 const struct firmware *fw,
50363 - struct be_dma_mem *flash_cmd, u32 flash_type)
50364 + struct be_dma_mem *flash_cmd, int num_of_images)
50365 +
50366 {
50367 - int status;
50368 - u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
50369 + int status = 0, i, filehdr_size = 0;
50370 + u32 total_bytes = 0, flash_op;
50371 int num_bytes;
50372 const u8 *p = fw->data;
50373 struct be_cmd_write_flashrom *req = flash_cmd->va;
50374 + struct flash_comp *pflashcomp;
50375 + int num_comp;
50376
50377 - switch (flash_type) {
50378 - case FLASHROM_TYPE_ISCSI_ACTIVE:
50379 - image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
50380 - image_size = FLASH_IMAGE_MAX_SIZE;
50381 - break;
50382 - case FLASHROM_TYPE_ISCSI_BACKUP:
50383 - image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
50384 - image_size = FLASH_IMAGE_MAX_SIZE;
50385 - break;
50386 - case FLASHROM_TYPE_FCOE_FW_ACTIVE:
50387 - image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
50388 - image_size = FLASH_IMAGE_MAX_SIZE;
50389 - break;
50390 - case FLASHROM_TYPE_FCOE_FW_BACKUP:
50391 - image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
50392 - image_size = FLASH_IMAGE_MAX_SIZE;
50393 - break;
50394 - case FLASHROM_TYPE_BIOS:
50395 - image_offset = FLASH_iSCSI_BIOS_START;
50396 - image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50397 - break;
50398 - case FLASHROM_TYPE_FCOE_BIOS:
50399 - image_offset = FLASH_FCoE_BIOS_START;
50400 - image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50401 - break;
50402 - case FLASHROM_TYPE_PXE_BIOS:
50403 - image_offset = FLASH_PXE_BIOS_START;
50404 - image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50405 - break;
50406 - default:
50407 - return 0;
50408 + struct flash_comp gen3_flash_types[10] = {
50409 + { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
50410 + FLASH_IMAGE_MAX_SIZE_g3},
50411 + { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
50412 + FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
50413 + { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
50414 + FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50415 + { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
50416 + FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50417 + { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
50418 + FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50419 + { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
50420 + FLASH_IMAGE_MAX_SIZE_g3},
50421 + { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
50422 + FLASH_IMAGE_MAX_SIZE_g3},
50423 + { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
50424 + FLASH_IMAGE_MAX_SIZE_g3},
50425 + { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
50426 + FLASH_NCSI_IMAGE_MAX_SIZE_g3},
50427 + { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
50428 + FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
50429 + };
50430 + struct flash_comp gen2_flash_types[8] = {
50431 + { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
50432 + FLASH_IMAGE_MAX_SIZE_g2},
50433 + { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
50434 + FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
50435 + { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
50436 + FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50437 + { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
50438 + FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50439 + { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
50440 + FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50441 + { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
50442 + FLASH_IMAGE_MAX_SIZE_g2},
50443 + { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
50444 + FLASH_IMAGE_MAX_SIZE_g2},
50445 + { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
50446 + FLASH_IMAGE_MAX_SIZE_g2}
50447 + };
50448 + if (adapter->generation == BE_GEN3) {
50449 + pflashcomp = gen3_flash_types;
50450 + filehdr_size = sizeof(struct flash_file_hdr_g3);
50451 + num_comp = ARRAY_SIZE(gen3_flash_types);
50452 + } else {
50453 + pflashcomp = gen2_flash_types;
50454 + filehdr_size = sizeof(struct flash_file_hdr_g2);
50455 + num_comp = ARRAY_SIZE(gen2_flash_types);
50456 }
50457 + for (i = 0; i < num_comp; i++) {
50458 + if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
50459 + memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
50460 + continue;
50461 + if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
50462 + if (!phy_flashing_required(adapter))
50463 + continue;
50464 + }
50465 + if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
50466 + (!be_flash_redboot(adapter, fw->data,
50467 + pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
50468 + (num_of_images * sizeof(struct image_hdr)))))
50469 + continue;
50470
50471 - p += sizeof(struct flash_file_hdr) + image_offset;
50472 - if (p + image_size > fw->data + fw->size)
50473 - return -1;
50474 -
50475 - total_bytes = image_size;
50476 -
50477 - while (total_bytes) {
50478 - if (total_bytes > 32*1024)
50479 - num_bytes = 32*1024;
50480 - else
50481 - num_bytes = total_bytes;
50482 - total_bytes -= num_bytes;
50483 -
50484 - if (!total_bytes)
50485 - flash_op = FLASHROM_OPER_FLASH;
50486 - else
50487 - flash_op = FLASHROM_OPER_SAVE;
50488 - memcpy(req->params.data_buf, p, num_bytes);
50489 - p += num_bytes;
50490 - status = be_cmd_write_flashrom(adapter, flash_cmd,
50491 - flash_type, flash_op, num_bytes);
50492 - if (status) {
50493 - dev_err(&adapter->pdev->dev,
50494 - "cmd to write to flash rom failed. type/op %d/%d\n",
50495 - flash_type, flash_op);
50496 + p = fw->data;
50497 + p += filehdr_size + pflashcomp[i].offset
50498 + + (num_of_images * sizeof(struct image_hdr));
50499 + if (p + pflashcomp[i].size > fw->data + fw->size)
50500 return -1;
50501 + total_bytes = pflashcomp[i].size;
50502 + while (total_bytes) {
50503 + if (total_bytes > 32*1024)
50504 + num_bytes = 32*1024;
50505 + else
50506 + num_bytes = total_bytes;
50507 + total_bytes -= num_bytes;
50508 + if (!total_bytes) {
50509 + if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
50510 + flash_op = FLASHROM_OPER_PHY_FLASH;
50511 + else
50512 + flash_op = FLASHROM_OPER_FLASH;
50513 + } else {
50514 + if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
50515 + flash_op = FLASHROM_OPER_PHY_SAVE;
50516 + else
50517 + flash_op = FLASHROM_OPER_SAVE;
50518 + }
50519 + memcpy(req->params.data_buf, p, num_bytes);
50520 + p += num_bytes;
50521 + status = be_cmd_write_flashrom(adapter, flash_cmd,
50522 + pflashcomp[i].optype, flash_op, num_bytes);
50523 + if (status) {
50524 + if ((status == ILLEGAL_IOCTL_REQ) &&
50525 + (pflashcomp[i].optype ==
50526 + IMG_TYPE_PHY_FW))
50527 + break;
50528 + dev_err(&adapter->pdev->dev,
50529 + "cmd to write to flash rom failed.\n");
50530 + return -1;
50531 + }
50532 + yield();
50533 }
50534 - yield();
50535 }
50536 -
50537 return 0;
50538 }
50539
50540 +static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
50541 +{
50542 + if (fhdr == NULL)
50543 + return 0;
50544 + if (fhdr->build[0] == '3')
50545 + return BE_GEN3;
50546 + else if (fhdr->build[0] == '2')
50547 + return BE_GEN2;
50548 + else
50549 + return 0;
50550 +}
50551 +
50552 int be_load_fw(struct be_adapter *adapter, u8 *func)
50553 {
50554 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
50555 const struct firmware *fw;
50556 - struct flash_file_hdr *fhdr;
50557 - struct flash_section_info *fsec = NULL;
50558 + struct flash_file_hdr_g2 *fhdr;
50559 + struct flash_file_hdr_g3 *fhdr3;
50560 + struct image_hdr *img_hdr_ptr = NULL;
50561 struct be_dma_mem flash_cmd;
50562 - int status;
50563 + int status, i = 0, num_imgs = 0;
50564 const u8 *p;
50565 - bool entry_found = false;
50566 - int flash_type;
50567 - char fw_ver[FW_VER_LEN];
50568 - char fw_cfg;
50569
50570 - status = be_cmd_get_fw_ver(adapter, fw_ver);
50571 - if (status)
50572 - return status;
50573 + if (!netif_running(adapter->netdev)) {
50574 + dev_err(&adapter->pdev->dev,
50575 + "Firmware load not allowed (interface is down)\n");
50576 + return -1;
50577 + }
50578
50579 - fw_cfg = *(fw_ver + 2);
50580 - if (fw_cfg == '0')
50581 - fw_cfg = '1';
50582 strcpy(fw_file, func);
50583
50584 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
50585 @@ -1826,34 +3202,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50586 goto fw_exit;
50587
50588 p = fw->data;
50589 - fhdr = (struct flash_file_hdr *) p;
50590 - if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
50591 - dev_err(&adapter->pdev->dev,
50592 - "Firmware(%s) load error (signature did not match)\n",
50593 - fw_file);
50594 - status = -1;
50595 - goto fw_exit;
50596 - }
50597 -
50598 + fhdr = (struct flash_file_hdr_g2 *) p;
50599 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
50600
50601 - p += sizeof(struct flash_file_hdr);
50602 - while (p < (fw->data + fw->size)) {
50603 - fsec = (struct flash_section_info *)p;
50604 - if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
50605 - entry_found = true;
50606 - break;
50607 - }
50608 - p += 32;
50609 - }
50610 -
50611 - if (!entry_found) {
50612 - status = -1;
50613 - dev_err(&adapter->pdev->dev,
50614 - "Flash cookie not found in firmware image\n");
50615 - goto fw_exit;
50616 - }
50617 -
50618 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
50619 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
50620 &flash_cmd.dma);
50621 @@ -1864,12 +3215,25 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50622 goto fw_exit;
50623 }
50624
50625 - for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
50626 - flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
50627 - status = be_flash_image(adapter, fw, &flash_cmd,
50628 - flash_type);
50629 - if (status)
50630 - break;
50631 + if ((adapter->generation == BE_GEN3) &&
50632 + (get_ufigen_type(fhdr) == BE_GEN3)) {
50633 + fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
50634 + num_imgs = le32_to_cpu(fhdr3->num_imgs);
50635 + for (i = 0; i < num_imgs; i++) {
50636 + img_hdr_ptr = (struct image_hdr *) (fw->data +
50637 + (sizeof(struct flash_file_hdr_g3) +
50638 + i * sizeof(struct image_hdr)));
50639 + if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
50640 + status = be_flash_data(adapter, fw, &flash_cmd,
50641 + num_imgs);
50642 + }
50643 + } else if ((adapter->generation == BE_GEN2) &&
50644 + (get_ufigen_type(fhdr) == BE_GEN2)) {
50645 + status = be_flash_data(adapter, fw, &flash_cmd, 0);
50646 + } else {
50647 + dev_err(&adapter->pdev->dev,
50648 + "UFI and Interface are not compatible for flashing\n");
50649 + status = -1;
50650 }
50651
50652 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
50653 @@ -1879,14 +3243,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50654 goto fw_exit;
50655 }
50656
50657 - dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n");
50658 + dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
50659
50660 fw_exit:
50661 release_firmware(fw);
50662 return status;
50663 }
50664
50665 -static struct net_device_ops be_netdev_ops = {
50666 +static net_device_ops_no_const be_netdev_ops = {
50667 .ndo_open = be_open,
50668 .ndo_stop = be_close,
50669 .ndo_start_xmit = be_xmit,
50670 @@ -1898,15 +3262,32 @@ static struct net_device_ops be_netdev_ops = {
50671 .ndo_vlan_rx_register = be_vlan_register,
50672 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
50673 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
50674 +#ifdef HAVE_SRIOV_CONFIG
50675 + .ndo_set_vf_mac = be_set_vf_mac,
50676 + .ndo_set_vf_vlan = be_set_vf_vlan,
50677 + .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
50678 + .ndo_get_vf_config = be_get_vf_config,
50679 +#endif
50680 + .ndo_do_ioctl = be_do_ioctl,
50681 +#ifdef CONFIG_NET_POLL_CONTROLLER
50682 + .ndo_poll_controller = be_netpoll,
50683 +#endif
50684 };
50685
50686 -static void be_netdev_init(struct net_device *netdev)
50687 +static int be_netdev_init(struct net_device *netdev)
50688 {
50689 struct be_adapter *adapter = netdev_priv(netdev);
50690 + struct be_rx_obj *rxo;
50691 + int i, status = 0;
50692
50693 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
50694 - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
50695 - NETIF_F_GRO;
50696 + NETIF_F_HW_VLAN_TX | NETIF_F_HW_CSUM | NETIF_F_TSO6;
50697 +
50698 + netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
50699 + NETIF_F_HW_CSUM;
50700 +
50701 + netdev->features |= NETIF_F_VLAN_SG | NETIF_F_VLAN_TSO |
50702 + NETIF_F_VLAN_CSUM;
50703
50704 netdev->flags |= IFF_MULTICAST;
50705
50706 @@ -1918,17 +3299,30 @@ static void be_netdev_init(struct net_device *netdev)
50707
50708 netif_set_gso_max_size(netdev, 65535);
50709
50710 + if (adapter->flags & BE_FLAGS_DCBX)
50711 + be_netdev_ops.ndo_select_queue = be_select_queue;
50712 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
50713 -
50714 +
50715 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
50716
50717 - netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
50718 - BE_NAPI_WEIGHT);
50719 - netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
50720 + be_lro_init(adapter, netdev);
50721 +
50722 + for_all_rx_queues(adapter, rxo, i) {
50723 + status = be_netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
50724 + BE_NAPI_WEIGHT);
50725 + if (status) {
50726 + dev_err(&adapter->pdev->dev, "dummy netdev alloc fail"
50727 + "for rxo:%d\n", i);
50728 + return status;
50729 + }
50730 + }
50731 + status = be_netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
50732 BE_NAPI_WEIGHT);
50733 + if (status)
50734 + dev_err(&adapter->pdev->dev, "dummy netdev alloc fail"
50735 + "for tx\n");
50736
50737 - netif_carrier_off(netdev);
50738 - netif_stop_queue(netdev);
50739 + return status;
50740 }
50741
50742 static void be_unmap_pci_bars(struct be_adapter *adapter)
50743 @@ -1937,37 +3331,62 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
50744 iounmap(adapter->csr);
50745 if (adapter->db)
50746 iounmap(adapter->db);
50747 - if (adapter->pcicfg)
50748 + if (adapter->pcicfg && be_physfn(adapter))
50749 iounmap(adapter->pcicfg);
50750 }
50751
50752 static int be_map_pci_bars(struct be_adapter *adapter)
50753 {
50754 + struct pci_dev *pdev = adapter->pdev;
50755 u8 __iomem *addr;
50756 - int pcicfg_reg;
50757 + int pcicfg_reg, db_reg;
50758
50759 - addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
50760 - pci_resource_len(adapter->pdev, 2));
50761 - if (addr == NULL)
50762 - return -ENOMEM;
50763 - adapter->csr = addr;
50764 + if (lancer_chip(adapter)) {
50765 + addr = ioremap_nocache(pci_resource_start(pdev, 0),
50766 + pci_resource_len(adapter->pdev, 0));
50767 + if (addr == NULL)
50768 + return -ENOMEM;
50769 + adapter->db = addr;
50770 + return 0;
50771 + }
50772
50773 - addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
50774 - 128 * 1024);
50775 - if (addr == NULL)
50776 - goto pci_map_err;
50777 - adapter->db = addr;
50778 + if (be_physfn(adapter)) {
50779 + addr = ioremap_nocache(pci_resource_start(pdev, 2),
50780 + pci_resource_len(pdev, 2));
50781 + if (addr == NULL)
50782 + return -ENOMEM;
50783 + adapter->csr = addr;
50784 + adapter->netdev->mem_start = pci_resource_start(pdev, 2);
50785 + adapter->netdev->mem_end = pci_resource_start(pdev, 2) +
50786 + pci_resource_len(pdev, 2);
50787 + }
50788
50789 - if (adapter->generation == BE_GEN2)
50790 + if (adapter->generation == BE_GEN2) {
50791 pcicfg_reg = 1;
50792 - else
50793 + db_reg = 4;
50794 + } else {
50795 pcicfg_reg = 0;
50796 + if (be_physfn(adapter))
50797 + db_reg = 4;
50798 + else
50799 + db_reg = 0;
50800 + }
50801
50802 - addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
50803 - pci_resource_len(adapter->pdev, pcicfg_reg));
50804 + addr = ioremap_nocache(pci_resource_start(pdev, db_reg),
50805 + pci_resource_len(pdev, db_reg));
50806 if (addr == NULL)
50807 goto pci_map_err;
50808 - adapter->pcicfg = addr;
50809 + adapter->db = addr;
50810 +
50811 + if (be_physfn(adapter)) {
50812 + addr = ioremap_nocache(
50813 + pci_resource_start(pdev, pcicfg_reg),
50814 + pci_resource_len(pdev, pcicfg_reg));
50815 + if (addr == NULL)
50816 + goto pci_map_err;
50817 + adapter->pcicfg = addr;
50818 + } else
50819 + adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
50820
50821 return 0;
50822 pci_map_err:
50823 @@ -1985,40 +3404,69 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
50824 if (mem->va)
50825 pci_free_consistent(adapter->pdev, mem->size,
50826 mem->va, mem->dma);
50827 +
50828 + mem = &adapter->rx_filter;
50829 + if (mem->va)
50830 + pci_free_consistent(adapter->pdev, mem->size,
50831 + mem->va, mem->dma);
50832 }
50833
50834 static int be_ctrl_init(struct be_adapter *adapter)
50835 {
50836 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
50837 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
50838 + struct be_dma_mem *rx_filter = &adapter->rx_filter;
50839 int status;
50840
50841 status = be_map_pci_bars(adapter);
50842 if (status)
50843 - return status;
50844 + goto done;
50845
50846 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
50847 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
50848 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
50849 if (!mbox_mem_alloc->va) {
50850 - be_unmap_pci_bars(adapter);
50851 - return -1;
50852 + status = -ENOMEM;
50853 + goto unmap_pci_bars;
50854 }
50855 +
50856 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
50857 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
50858 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
50859 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
50860 - spin_lock_init(&adapter->mbox_lock);
50861 +
50862 + rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
50863 + rx_filter->va = pci_alloc_consistent(adapter->pdev, rx_filter->size,
50864 + &rx_filter->dma);
50865 + if (rx_filter->va == NULL) {
50866 + status = -ENOMEM;
50867 + goto free_mbox;
50868 + }
50869 + memset(rx_filter->va, 0, rx_filter->size);
50870 +
50871 + mutex_init(&adapter->mbox_lock);
50872 spin_lock_init(&adapter->mcc_lock);
50873 spin_lock_init(&adapter->mcc_cq_lock);
50874
50875 + init_completion(&adapter->flash_compl);
50876 +
50877 + PCI_SAVE_STATE(adapter->pdev);
50878 return 0;
50879 +
50880 +free_mbox:
50881 + pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
50882 + mbox_mem_alloc->va, mbox_mem_alloc->dma);
50883 +
50884 +unmap_pci_bars:
50885 + be_unmap_pci_bars(adapter);
50886 +
50887 +done:
50888 + return status;
50889 }
50890
50891 static void be_stats_cleanup(struct be_adapter *adapter)
50892 {
50893 - struct be_stats_obj *stats = &adapter->stats;
50894 - struct be_dma_mem *cmd = &stats->cmd;
50895 + struct be_dma_mem *cmd = &adapter->stats_cmd;
50896
50897 if (cmd->va)
50898 pci_free_consistent(adapter->pdev, cmd->size,
50899 @@ -2027,10 +3475,12 @@ static void be_stats_cleanup(struct be_adapter *adapter)
50900
50901 static int be_stats_init(struct be_adapter *adapter)
50902 {
50903 - struct be_stats_obj *stats = &adapter->stats;
50904 - struct be_dma_mem *cmd = &stats->cmd;
50905 + struct be_dma_mem *cmd = &adapter->stats_cmd;
50906
50907 - cmd->size = sizeof(struct be_cmd_req_get_stats);
50908 + if (adapter->generation == BE_GEN2)
50909 + cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
50910 + else
50911 + cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
50912 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
50913 if (cmd->va == NULL)
50914 return -1;
50915 @@ -2041,9 +3491,17 @@ static int be_stats_init(struct be_adapter *adapter)
50916 static void __devexit be_remove(struct pci_dev *pdev)
50917 {
50918 struct be_adapter *adapter = pci_get_drvdata(pdev);
50919 +
50920 if (!adapter)
50921 return;
50922
50923 + cancel_delayed_work_sync(&adapter->work);
50924 +
50925 +#ifdef CONFIG_PALAU
50926 + be_sysfs_remove_group(adapter);
50927 +#endif
50928 +
50929 + /* be_close() gets called if the device is open by unregister */
50930 unregister_netdev(adapter->netdev);
50931
50932 be_clear(adapter);
50933 @@ -2052,36 +3510,203 @@ static void __devexit be_remove(struct pci_dev *pdev)
50934
50935 be_ctrl_cleanup(adapter);
50936
50937 - if (adapter->msix_enabled) {
50938 - pci_disable_msix(adapter->pdev);
50939 - adapter->msix_enabled = false;
50940 - }
50941 + kfree(adapter->vf_cfg);
50942 + be_sriov_disable(adapter);
50943 +
50944 + be_msix_disable(adapter);
50945
50946 pci_set_drvdata(pdev, NULL);
50947 pci_release_regions(pdev);
50948 pci_disable_device(pdev);
50949 -
50950 + be_netif_napi_del(adapter->netdev);
50951 free_netdev(adapter->netdev);
50952 }
50953
50954 -static int be_hw_up(struct be_adapter *adapter)
50955 +static void be_pcie_slot_check(struct be_adapter *adapter)
50956 +{
50957 + u32 curr, max, width, max_wd, speed, max_sp;
50958 +
50959 + pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_STATUS_OFFSET,
50960 + &curr);
50961 + width = (curr >> PCIE_LINK_STATUS_NEG_WIDTH_SHIFT) &
50962 + PCIE_LINK_STATUS_NEG_WIDTH_MASK;
50963 + speed = (curr >> PCIE_LINK_STATUS_SPEED_SHIFT) &
50964 + PCIE_LINK_STATUS_SPEED_MASK;
50965 +
50966 + pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_CAP_OFFSET,
50967 + &max);
50968 + max_wd = (max >> PCIE_LINK_CAP_MAX_WIDTH_SHIFT) &
50969 + PCIE_LINK_CAP_MAX_WIDTH_MASK;
50970 + max_sp = (max >> PCIE_LINK_CAP_MAX_SPEED_SHIFT) &
50971 + PCIE_LINK_CAP_MAX_SPEED_MASK;
50972 +
50973 + if (width < max_wd || speed < max_sp)
50974 + dev_warn(&adapter->pdev->dev,
50975 + "Found network device in a Gen%s x%d PCIe slot. It "
50976 + "should be in a Gen2 x%d slot for best performance\n",
50977 + speed < max_sp ? "1" : "2", width, max_wd);
50978 +}
50979 +
50980 +static int be_get_ioctl_version(char *fw_version) {
50981 + char *str[4];
50982 + int i;
50983 + int val[4];
50984 + char *endptr;
50985 +
50986 + if(!fw_version)
50987 + return 0;
50988 + for(i=0; i<3; i++) {
50989 + str[i] = strsep(&fw_version, ".");
50990 + val[i] = simple_strtol(str[i], &endptr, 10);
50991 + }
50992 +
50993 + if (val[0]>4 || (val[0]>3 && val[2]>143))
50994 + return 1;
50995 + return 0;
50996 +}
50997 +
50998 +static int be_get_port_names(struct be_adapter *adapter)
50999 {
51000 int status;
51001 + int ver;
51002
51003 - status = be_cmd_POST(adapter);
51004 + status = be_cmd_get_fw_ver(adapter,
51005 + adapter->fw_ver, NULL);
51006 if (status)
51007 return status;
51008 + ver = be_get_ioctl_version(adapter->fw_ver);
51009 + if (ver && (adapter->generation == BE_GEN3))
51010 + status = be_cmd_query_port_names_v1(adapter,
51011 + adapter->port_name);
51012 + else
51013 + status = be_cmd_query_port_names_v0(adapter,
51014 + adapter->port_name);
51015 + return status;
51016 +}
51017
51018 - status = be_cmd_reset_function(adapter);
51019 +static int be_get_config(struct be_adapter *adapter)
51020 +{
51021 + int status;
51022 + u8 mac[ETH_ALEN];
51023 +
51024 + status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
51025 + &adapter->function_mode,
51026 + &adapter->function_caps);
51027 if (status)
51028 return status;
51029
51030 - status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
51031 + status = be_cmd_get_cntl_attributes(adapter);
51032 if (status)
51033 return status;
51034
51035 - status = be_cmd_query_fw_cfg(adapter,
51036 - &adapter->port_num, &adapter->cap);
51037 + memset(mac, 0, ETH_ALEN);
51038 + be_pcie_slot_check(adapter);
51039 +
51040 + if (be_physfn(adapter)) {
51041 + status = be_cmd_mac_addr_query(adapter, mac,
51042 + MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
51043 +
51044 + if (status)
51045 + return status;
51046 +
51047 + if (!is_valid_ether_addr(mac))
51048 + return -EADDRNOTAVAIL;
51049 +
51050 + memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
51051 + memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
51052 + }
51053 +
51054 + if (adapter->function_mode & FLEX10_MODE)
51055 + adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
51056 + else
51057 + adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
51058 +
51059 + return 0;
51060 +}
51061 +
51062 +static int be_dev_family_check(struct be_adapter *adapter)
51063 +{
51064 + struct pci_dev *pdev = adapter->pdev;
51065 + u32 sli_intf = 0, if_type;
51066 +
51067 + switch (pdev->device) {
51068 + case BE_DEVICE_ID1:
51069 + case OC_DEVICE_ID1:
51070 + adapter->generation = BE_GEN2;
51071 + break;
51072 + case BE_DEVICE_ID2:
51073 + case OC_DEVICE_ID2:
51074 + adapter->generation = BE_GEN3;
51075 + break;
51076 + case OC_DEVICE_ID3:
51077 + pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
51078 + if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
51079 + SLI_INTF_IF_TYPE_SHIFT;
51080 +
51081 + if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
51082 + if_type != 0x02) {
51083 + dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
51084 + return -EINVAL;
51085 + }
51086 + if (num_vfs > 0) {
51087 + dev_err(&pdev->dev, "VFs not supported\n");
51088 + return -EINVAL;
51089 + }
51090 + adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
51091 + SLI_INTF_FAMILY_SHIFT);
51092 + adapter->generation = BE_GEN3;
51093 + break;
51094 + default:
51095 + adapter->generation = 0;
51096 + }
51097 + return 0;
51098 +}
51099 +
51100 +static int lancer_wait_ready(struct be_adapter *adapter)
51101 +{
51102 +#define SLIPORT_READY_TIMEOUT 500
51103 + u32 sliport_status;
51104 + int status = 0, i;
51105 +
51106 + for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
51107 + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
51108 + if (sliport_status & SLIPORT_STATUS_RDY_MASK)
51109 + break;
51110 +
51111 + msleep(20);
51112 + }
51113 +
51114 + if (i == SLIPORT_READY_TIMEOUT)
51115 + status = -1;
51116 +
51117 + return status;
51118 +}
51119 +
51120 +static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
51121 +{
51122 + int status;
51123 + u32 sliport_status, err, reset_needed;
51124 + status = lancer_wait_ready(adapter);
51125 + if (!status) {
51126 + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
51127 + err = sliport_status & SLIPORT_STATUS_ERR_MASK;
51128 + reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
51129 + if (err && reset_needed) {
51130 + iowrite32(SLI_PORT_CONTROL_IP_MASK,
51131 + adapter->db + SLIPORT_CONTROL_OFFSET);
51132 +
51133 + /* check adapter has corrected the error */
51134 + status = lancer_wait_ready(adapter);
51135 + sliport_status = ioread32(adapter->db +
51136 + SLIPORT_STATUS_OFFSET);
51137 + sliport_status &= (SLIPORT_STATUS_ERR_MASK |
51138 + SLIPORT_STATUS_RN_MASK);
51139 + if (status || sliport_status)
51140 + status = -1;
51141 + } else if (err || reset_needed) {
51142 + status = -1;
51143 + }
51144 + }
51145 return status;
51146 }
51147
51148 @@ -2091,7 +3716,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
51149 int status = 0;
51150 struct be_adapter *adapter;
51151 struct net_device *netdev;
51152 - u8 mac[ETH_ALEN];
51153 + u32 en;
51154
51155 status = pci_enable_device(pdev);
51156 if (status)
51157 @@ -2102,31 +3727,22 @@ static int __devinit be_probe(struct pci_dev *pdev,
51158 goto disable_dev;
51159 pci_set_master(pdev);
51160
51161 - netdev = alloc_etherdev(sizeof(struct be_adapter));
51162 + netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
51163 if (netdev == NULL) {
51164 status = -ENOMEM;
51165 goto rel_reg;
51166 }
51167 adapter = netdev_priv(netdev);
51168
51169 - switch (pdev->device) {
51170 - case BE_DEVICE_ID1:
51171 - case OC_DEVICE_ID1:
51172 - adapter->generation = BE_GEN2;
51173 - break;
51174 - case BE_DEVICE_ID2:
51175 - case OC_DEVICE_ID2:
51176 - adapter->generation = BE_GEN3;
51177 - break;
51178 - default:
51179 - adapter->generation = 0;
51180 - }
51181 -
51182 adapter->pdev = pdev;
51183 +
51184 + status = be_dev_family_check(adapter);
51185 + if (status)
51186 + goto free_netdev;
51187 +
51188 pci_set_drvdata(pdev, adapter);
51189 adapter->netdev = netdev;
51190 -
51191 - be_msix_enable(adapter);
51192 + SET_NETDEV_DEV(netdev, &pdev->dev);
51193
51194 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
51195 if (!status) {
51196 @@ -2139,46 +3755,150 @@ static int __devinit be_probe(struct pci_dev *pdev,
51197 }
51198 }
51199
51200 + be_sriov_enable(adapter);
51201 + if (adapter->num_vfs > 0) {
51202 + adapter->vf_cfg = kcalloc(adapter->num_vfs,
51203 + sizeof(struct be_vf_cfg), GFP_KERNEL);
51204 +
51205 + if (!adapter->vf_cfg)
51206 + goto free_netdev;
51207 + }
51208 +
51209 status = be_ctrl_init(adapter);
51210 if (status)
51211 - goto free_netdev;
51212 + goto free_vf_cfg;
51213 +
51214 + if (lancer_chip(adapter)) {
51215 + status = lancer_test_and_set_rdy_state(adapter);
51216 + if (status) {
51217 + dev_err(&pdev->dev, "Adapter in non recoverable error\n");
51218 + goto ctrl_clean;
51219 + }
51220 + }
51221 +
51222 + /* sync up with fw's ready state */
51223 + if (be_physfn(adapter)) {
51224 + status = be_cmd_POST(adapter);
51225 + if (status)
51226 + goto ctrl_clean;
51227 + }
51228 +
51229 + /* tell fw we're ready to fire cmds */
51230 + status = be_cmd_fw_init(adapter);
51231 + if (status)
51232 + goto ctrl_clean;
51233 +
51234 + status = be_cmd_reset_function(adapter);
51235 + if (status)
51236 + goto ctrl_clean;
51237
51238 status = be_stats_init(adapter);
51239 if (status)
51240 goto ctrl_clean;
51241
51242 - status = be_hw_up(adapter);
51243 + status = be_get_config(adapter);
51244 if (status)
51245 goto stats_clean;
51246
51247 - status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
51248 - true /* permanent */, 0);
51249 - if (status)
51250 - goto stats_clean;
51251 - memcpy(netdev->dev_addr, mac, ETH_ALEN);
51252 + /* This bit is zero in normal boot case, but in crash kernel case this
51253 + is not cleared. clear this bit here, until we are ready with the irqs
51254 + i.e in be_open call.*/
51255 + if (!lancer_chip(adapter))
51256 + be_intr_set(adapter, false);
51257 +
51258 + if (msix)
51259 + be_msix_enable(adapter);
51260
51261 INIT_DELAYED_WORK(&adapter->work, be_worker);
51262 - be_netdev_init(netdev);
51263 - SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
51264
51265 status = be_setup(adapter);
51266 if (status)
51267 - goto stats_clean;
51268 + goto msix_disable;
51269 +
51270 + /* Initilize the link status to -1 */
51271 + adapter->link_status = -1;
51272 +
51273 + status = be_netdev_init(netdev);
51274 + if (status)
51275 + goto unsetup;
51276 +
51277 status = register_netdev(netdev);
51278 if (status != 0)
51279 goto unsetup;
51280
51281 - dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
51282 + be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
51283 +
51284 + if (be_physfn(adapter) && adapter->num_vfs) {
51285 + u8 mac_speed;
51286 + int link_status;
51287 + u16 def_vlan, vf, lnk_speed;
51288 +
51289 + status = be_vf_eth_addr_config(adapter);
51290 + if (status)
51291 + goto unreg_netdev;
51292 +
51293 + for (vf = 0; vf < adapter->num_vfs; vf++) {
51294 + status = be_cmd_get_hsw_config(adapter, &def_vlan,
51295 + vf + 1, adapter->vf_cfg[vf].vf_if_handle);
51296 + if (!status)
51297 + adapter->vf_cfg[vf].vf_def_vid = def_vlan;
51298 + else
51299 + goto unreg_netdev;
51300 +
51301 + status = be_cmd_link_status_query(adapter, &link_status,
51302 + &mac_speed, &lnk_speed, vf + 1);
51303 + if (!status)
51304 + adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
51305 + else
51306 + goto unreg_netdev;
51307 + }
51308 + }
51309 + if (be_physfn(adapter)) {
51310 + /* Temp fix ofr bug# 23034. Till ARM
51311 + * f/w fixes privilege lvl */
51312 + be_get_port_names(adapter);
51313 + }
51314 +
51315 + /* Enable Vlan capability based on privileges.
51316 + * PF will have Vlan capability anyway. */
51317 + be_cmd_get_fn_privileges(adapter, &en, 0);
51318 +
51319 + if ((en & (BE_PRIV_FILTMGMT | BE_PRIV_VHADM | BE_PRIV_DEVCFG)) ||
51320 + be_physfn(adapter))
51321 + netdev->features |= NETIF_F_HW_VLAN_FILTER;
51322 + else
51323 + netdev->features |= NETIF_F_VLAN_CHALLENGED;
51324 +
51325 + dev_info(&pdev->dev, "%s: numa node %d\n", netdev->name,
51326 + dev_to_node(&pdev->dev));
51327 + dev_info(&pdev->dev, "%s %s \"%s\" port %d\n", nic_name(pdev),
51328 + (adapter->port_num > 1 ? "1Gbps NIC" : "10Gbps NIC"),
51329 + adapter->model_number, adapter->hba_port_num);
51330 +
51331 +
51332 +#ifdef CONFIG_PALAU
51333 + be_sysfs_create_group(adapter);
51334 +#endif
51335 + schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
51336 return 0;
51337
51338 +unreg_netdev:
51339 + unregister_netdev(netdev);
51340 unsetup:
51341 be_clear(adapter);
51342 +msix_disable:
51343 + be_msix_disable(adapter);
51344 stats_clean:
51345 be_stats_cleanup(adapter);
51346 ctrl_clean:
51347 be_ctrl_cleanup(adapter);
51348 +free_vf_cfg:
51349 + kfree(adapter->vf_cfg);
51350 free_netdev:
51351 - free_netdev(adapter->netdev);
51352 + be_sriov_disable(adapter);
51353 + be_netif_napi_del(netdev);
51354 + free_netdev(netdev);
51355 + pci_set_drvdata(pdev, NULL);
51356 rel_reg:
51357 pci_release_regions(pdev);
51358 disable_dev:
51359 @@ -2193,6 +3913,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
51360 struct be_adapter *adapter = pci_get_drvdata(pdev);
51361 struct net_device *netdev = adapter->netdev;
51362
51363 + cancel_delayed_work_sync(&adapter->work);
51364 + if (adapter->wol)
51365 + be_setup_wol(adapter, true);
51366 +
51367 netif_device_detach(netdev);
51368 if (netif_running(netdev)) {
51369 rtnl_lock();
51370 @@ -2202,6 +3926,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
51371 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
51372 be_clear(adapter);
51373
51374 + be_msix_disable(adapter);
51375 pci_save_state(pdev);
51376 pci_disable_device(pdev);
51377 pci_set_power_state(pdev, pci_choose_state(pdev, state));
51378 @@ -2223,6 +3948,12 @@ static int be_resume(struct pci_dev *pdev)
51379 pci_set_power_state(pdev, 0);
51380 pci_restore_state(pdev);
51381
51382 + be_msix_enable(adapter);
51383 + /* tell fw we're ready to fire cmds */
51384 + status = be_cmd_fw_init(adapter);
51385 + if (status)
51386 + return status;
51387 +
51388 be_setup(adapter);
51389 if (netif_running(netdev)) {
51390 rtnl_lock();
51391 @@ -2230,28 +3961,152 @@ static int be_resume(struct pci_dev *pdev)
51392 rtnl_unlock();
51393 }
51394 netif_device_attach(netdev);
51395 +
51396 + if (adapter->wol)
51397 + be_setup_wol(adapter, false);
51398 +
51399 + schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
51400 return 0;
51401 }
51402
51403 +/*
51404 + * An FLR will stop BE from DMAing any data.
51405 + */
51406 +static void be_shutdown(struct pci_dev *pdev)
51407 +{
51408 + struct be_adapter *adapter = pci_get_drvdata(pdev);
51409 +
51410 + if (!adapter)
51411 + return;
51412 +
51413 + cancel_delayed_work_sync(&adapter->work);
51414 +
51415 + netif_device_detach(adapter->netdev);
51416 +
51417 + if (adapter->wol)
51418 + be_setup_wol(adapter, true);
51419 +
51420 + be_cmd_reset_function(adapter);
51421 +
51422 + pci_disable_device(pdev);
51423 +}
51424 +
51425 +static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
51426 + pci_channel_state_t state)
51427 +{
51428 + struct be_adapter *adapter = pci_get_drvdata(pdev);
51429 + struct net_device *netdev = adapter->netdev;
51430 +
51431 + dev_err(&adapter->pdev->dev, "EEH error detected\n");
51432 +
51433 + adapter->eeh_err = true;
51434 +
51435 + netif_device_detach(netdev);
51436 +
51437 + if (netif_running(netdev)) {
51438 + rtnl_lock();
51439 + be_close(netdev);
51440 + rtnl_unlock();
51441 + }
51442 + be_clear(adapter);
51443 +
51444 + if (state == pci_channel_io_perm_failure)
51445 + return PCI_ERS_RESULT_DISCONNECT;
51446 +
51447 + pci_disable_device(pdev);
51448 +
51449 + return PCI_ERS_RESULT_NEED_RESET;
51450 +}
51451 +
51452 +static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
51453 +{
51454 + struct be_adapter *adapter = pci_get_drvdata(pdev);
51455 + int status;
51456 +
51457 + dev_info(&adapter->pdev->dev, "EEH reset\n");
51458 + adapter->eeh_err = false;
51459 +
51460 + status = pci_enable_device(pdev);
51461 + if (status)
51462 + return PCI_ERS_RESULT_DISCONNECT;
51463 +
51464 + pci_set_master(pdev);
51465 + pci_set_power_state(pdev, 0);
51466 + pci_restore_state(pdev);
51467 +
51468 + /* Check if card is ok and fw is ready */
51469 + status = be_cmd_POST(adapter);
51470 + if (status)
51471 + return PCI_ERS_RESULT_DISCONNECT;
51472 +
51473 + return PCI_ERS_RESULT_RECOVERED;
51474 +}
51475 +
51476 +static void be_eeh_resume(struct pci_dev *pdev)
51477 +{
51478 + int status = 0;
51479 + struct be_adapter *adapter = pci_get_drvdata(pdev);
51480 + struct net_device *netdev = adapter->netdev;
51481 +
51482 + dev_info(&adapter->pdev->dev, "EEH resume\n");
51483 +
51484 + pci_save_state(pdev);
51485 +
51486 + /* tell fw we're ready to fire cmds */
51487 + status = be_cmd_fw_init(adapter);
51488 + if (status)
51489 + goto err;
51490 +
51491 + status = be_setup(adapter);
51492 + if (status)
51493 + goto err;
51494 +
51495 + if (netif_running(netdev)) {
51496 + status = be_open(netdev);
51497 + if (status)
51498 + goto err;
51499 + }
51500 + netif_device_attach(netdev);
51501 + return;
51502 +err:
51503 + dev_err(&adapter->pdev->dev, "EEH resume failed\n");
51504 + return;
51505 +}
51506 +
51507 +static struct pci_error_handlers be_eeh_handlers = {
51508 + .error_detected = be_eeh_err_detected,
51509 + .slot_reset = be_eeh_reset,
51510 + .resume = be_eeh_resume,
51511 +};
51512 +
51513 static struct pci_driver be_driver = {
51514 .name = DRV_NAME,
51515 .id_table = be_dev_ids,
51516 .probe = be_probe,
51517 .remove = be_remove,
51518 .suspend = be_suspend,
51519 - .resume = be_resume
51520 + .resume = be_resume,
51521 + .shutdown = be_shutdown,
51522 + .err_handler = &be_eeh_handlers
51523 };
51524
51525 static int __init be_init_module(void)
51526 {
51527 - if (rx_frag_size != 8192 && rx_frag_size != 4096
51528 - && rx_frag_size != 2048) {
51529 + if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
51530 + rx_frag_size != 2048) {
51531 printk(KERN_WARNING DRV_NAME
51532 " : Module param rx_frag_size must be 2048/4096/8192."
51533 " Using 2048\n");
51534 rx_frag_size = 2048;
51535 }
51536
51537 + if (!msix && num_vfs > 0) {
51538 + printk(KERN_WARNING DRV_NAME
51539 + " : MSIx required for num_vfs > 0. Ignoring msix=0\n");
51540 + msix = 1;
51541 + }
51542 +
51543 +
51544 return pci_register_driver(&be_driver);
51545 }
51546 module_init(be_init_module);
51547 diff --git a/drivers/net/benet/be_misc.c b/drivers/net/benet/be_misc.c
51548 new file mode 100644
51549 index 0000000..4ab499f
51550 --- /dev/null
51551 +++ b/drivers/net/benet/be_misc.c
51552 @@ -0,0 +1,106 @@
51553 +/*
51554 + * Copyright (C) 2005 - 2011 Emulex
51555 + * All rights reserved.
51556 + *
51557 + * This program is free software; you can redistribute it and/or
51558 + * modify it under the terms of the GNU General Public License version 2
51559 + * as published by the Free Software Foundation. The full GNU General
51560 + * Public License is included in this distribution in the file called COPYING.
51561 + *
51562 + * Contact Information:
51563 + * linux-drivers@emulex.com
51564 + *
51565 + * Emulex
51566 + * 3333 Susan Street
51567 + * Costa Mesa, CA 92626
51568 + */
51569 +#include "be.h"
51570 +
51571 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
51572 +static ssize_t
51573 +flash_fw_store(struct class_device *cd, const char *buf, size_t len)
51574 +{
51575 + struct be_adapter *adapter =
51576 + netdev_priv(container_of(cd, struct net_device, class_dev));
51577 + char file_name[ETHTOOL_FLASH_MAX_FILENAME];
51578 + int status;
51579 +
51580 + if (!capable(CAP_NET_ADMIN))
51581 + return -EPERM;
51582 +
51583 + file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
51584 + strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1));
51585 +
51586 + /* Removing new-line char given by sysfs */
51587 + file_name[strlen(file_name) - 1] = '\0';
51588 +
51589 + status = be_load_fw(adapter, file_name);
51590 + if (!status)
51591 + return len;
51592 + else
51593 + return status;
51594 +}
51595 +
51596 +static CLASS_DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store);
51597 +
51598 +static struct attribute *benet_attrs[] = {
51599 + &class_device_attr_flash_fw.attr,
51600 + NULL,
51601 +};
51602 +#else
51603 +
51604 +static ssize_t
51605 +flash_fw_store(struct device *dev, struct device_attribute *attr,
51606 + const char *buf, size_t len)
51607 +{
51608 + struct be_adapter *adapter =
51609 + netdev_priv(container_of(dev, struct net_device, dev));
51610 + char file_name[ETHTOOL_FLASH_MAX_FILENAME];
51611 + int status;
51612 +
51613 + if (!capable(CAP_NET_ADMIN))
51614 + return -EPERM;
51615 +
51616 + file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
51617 + strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1));
51618 +
51619 + /* Removing new-line char given by sysfs */
51620 + file_name[strlen(file_name) - 1] = '\0';
51621 +
51622 + status = be_load_fw(adapter, file_name);
51623 + if (!status)
51624 + return len;
51625 + else
51626 + return status;
51627 +}
51628 +
51629 +static DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store);
51630 +
51631 +static struct attribute *benet_attrs[] = {
51632 + &dev_attr_flash_fw.attr,
51633 + NULL,
51634 +};
51635 +#endif
51636 +
51637 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
51638 +#define CLASS_DEV class_dev
51639 +#else
51640 +#define CLASS_DEV dev
51641 +#endif
51642 +
51643 +static struct attribute_group benet_attr_group = {.attrs = benet_attrs };
51644 +
51645 +void be_sysfs_create_group(struct be_adapter *adapter)
51646 +{
51647 + int status;
51648 +
51649 + status = sysfs_create_group(&adapter->netdev->CLASS_DEV.kobj,
51650 + &benet_attr_group);
51651 + if (status)
51652 + dev_err(&adapter->pdev->dev, "Could not create sysfs group\n");
51653 +}
51654 +
51655 +void be_sysfs_remove_group(struct be_adapter *adapter)
51656 +{
51657 + sysfs_remove_group(&adapter->netdev->CLASS_DEV.kobj, &benet_attr_group);
51658 +}
51659 diff --git a/drivers/net/benet/be_proc.c b/drivers/net/benet/be_proc.c
51660 new file mode 100644
51661 index 0000000..0bfdb3b
51662 --- /dev/null
51663 +++ b/drivers/net/benet/be_proc.c
51664 @@ -0,0 +1,513 @@
51665 +/*
51666 + * Copyright (C) 2005 - 2011 ServerEngines
51667 + * All rights reserved.
51668 + *
51669 + * This program is free software; you can redistribute it and/or
51670 + * modify it under the terms of the GNU General Public License version 2
51671 + * as published by the Free Software Foundation. The full GNU General
51672 + * Public License is included in this distribution in the file called COPYING.
51673 + *
51674 + * Contact Information:
51675 + * linux-drivers@serverengines.com
51676 + *
51677 + * ServerEngines
51678 + * 209 N. Fair Oaks Ave
51679 + * Sunnyvale, CA 94085
51680 + */
51681 +#include <linux/proc_fs.h>
51682 +#include "be.h"
51683 +
51684 +char *be_adpt_name[] = {
51685 + "driver/be2net0",
51686 + "driver/be2net1",
51687 + "driver/be2net2",
51688 + "driver/be2net3",
51689 + "driver/be2net4",
51690 + "driver/be2net5",
51691 + "driver/be2net6",
51692 + "driver/be2net7"
51693 +};
51694 +
51695 +#define MAX_BE_DEVICES 8
51696 +struct proc_dir_entry *be_proc_dir[MAX_BE_DEVICES];
51697 +
51698 +/*File to read Eth Ring Information */
51699 +#define BE_ETH_RING_FILE "eth_ring"
51700 +#define BE_DRVR_STAT_FILE "drvr_stat"
51701 +
51702 +/*
51703 + * this file enables user to read a 32 bit CSR register.
51704 + * to read 32 bit value of a register at offset 0x1234,
51705 + * first write the offset 0x1234 (echo "0x1234") in
51706 + * the file and then read the value from this file.
51707 + * the written offset is latched until another value is written
51708 + */
51709 +#define BE_CSR_R_FILE "csrr"
51710 +/*
51711 + * this file enables user to write to a 32 bit CSR register.
51712 + * to write a value 0xdeadbeef to a register at offset 0x1234,
51713 + * write 0x1234 0xdeadbeef (echo "0x1234 0xdeadbeeb") to
51714 + * the file.
51715 + */
51716 +#define BE_CSR_W_FILE "csrw"
51717 +
51718 +#define BE_PROC_MODE 0600
51719 +
51720 +static char read_eth_ring_buf[4096];
51721 +static int read_eth_ring_count;
51722 +
51723 +/*
51724 + * Get Various Eth Ring Properties
51725 + */
51726 +static int proc_eth_read_ring(char *page, char **start,
51727 + off_t off, int count, int *eof, void *data)
51728 +{
51729 + int i, n;
51730 + char *p = read_eth_ring_buf;
51731 + struct be_adapter *adapter = (struct be_adapter *) data;
51732 +
51733 + if (off == 0) {
51734 + /* Reset read_eth_ring_count */
51735 + read_eth_ring_count = 0;
51736 +
51737 + n = sprintf(p, " PhyAddr VirtAddr Size TotalEntries ProducerIndex ConsumerIndex NumUsed\n");
51738 + p += n;
51739 + read_eth_ring_count += n;
51740 +
51741 + n = sprintf(p, " ------- -------- ---- ------------ ------------- ------------- -------\n");
51742 + p += n;
51743 + read_eth_ring_count += n;
51744 +
51745 + n = sprintf(p, "%s", "EthSendRing");
51746 + p += n;
51747 + read_eth_ring_count += n;
51748 +
51749 + n = sprintf(p, " %7lx %8p %4u %12u %13u %13u %7u \n",
51750 + (long) adapter->tx_obj.q.dma_mem.dma,
51751 + (void *)adapter->tx_obj.q.dma_mem.va,
51752 + (u32) (adapter->tx_obj.q.len *
51753 + sizeof(struct be_eth_wrb)),
51754 + adapter->tx_obj.q.len, adapter->tx_obj.q.head,
51755 + adapter->tx_obj.q.tail,
51756 + atomic_read(&adapter->tx_obj.q.used));
51757 +
51758 + p += n;
51759 + read_eth_ring_count += n;
51760 +
51761 + /* Get Eth Send Compl Queue Details */
51762 + n = sprintf(p, "%s", "EthSendCmplRing");
51763 + p += n;
51764 + read_eth_ring_count += n;
51765 +
51766 + n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n",
51767 + (long)adapter->tx_obj.cq.dma_mem.dma,
51768 + (void *)adapter->tx_obj.cq.dma_mem.va,
51769 + (u32) (adapter->tx_obj.cq.len *
51770 + sizeof(struct be_eth_tx_compl)),
51771 + adapter->tx_obj.cq.len, "NA",
51772 + adapter->tx_obj.cq.tail, "NA");
51773 +
51774 + p += n;
51775 + read_eth_ring_count += n;
51776 + /* Get Eth Rx Queue Details */
51777 + n = sprintf(p, "%s", "EthRxRing");
51778 + p += n;
51779 + read_eth_ring_count += n;
51780 +
51781 + n = sprintf(p, " %7lx %8p %4u %12u %13u %13s %7u \n",
51782 + (long)adapter->rx_obj.q.dma_mem.dma,
51783 + (void *)adapter->rx_obj.q.dma_mem.va,
51784 + (u32) (adapter->rx_obj.q.len *
51785 + sizeof(struct be_eth_rx_d)),
51786 + adapter->rx_obj.q.len, adapter->rx_obj.q.head,"NA",
51787 + atomic_read(&adapter->rx_obj.q.used));
51788 + p += n;
51789 + read_eth_ring_count += n;
51790 +
51791 + /* Get Eth Unicast Rx Compl Queue Details */
51792 + n = sprintf(p, "%s", "EthRxCmplRing");
51793 + p += n;
51794 + read_eth_ring_count += n;
51795 +
51796 + n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n",
51797 + (long)adapter->rx_obj.cq.dma_mem.dma,
51798 + (void *)adapter->rx_obj.cq.dma_mem.va,
51799 + (u32) (adapter->rx_obj.cq.len *
51800 + sizeof(struct be_eth_rx_compl)),
51801 + adapter->rx_obj.cq.len, "NA",
51802 + adapter->rx_obj.cq.tail, "NA");
51803 + p += n;
51804 + read_eth_ring_count += n;
51805 +
51806 + /* Get Eth Event Queue Details */
51807 + n = sprintf(p, "%s", "EthTxEventRing");
51808 + p += n;
51809 + read_eth_ring_count += n;
51810 +
51811 + n = sprintf(p,
51812 + " %7lx %8p %4u %12u %13s %13u %7s\n",
51813 + (long) adapter->tx_eq.q.dma_mem.dma,
51814 + (void *)adapter->tx_eq.q.dma_mem.va,
51815 + (u32) (adapter->tx_eq.q.len *
51816 + sizeof(struct be_eq_entry)),
51817 + adapter->tx_eq.q.len, "NA",
51818 + adapter->tx_eq.q.tail, "NA");
51819 +
51820 + p += n;
51821 + read_eth_ring_count += n;
51822 +
51823 + /* Get Eth Event Queue Details */
51824 + n = sprintf(p, "%s", "EthRxEventRing");
51825 + p += n;
51826 + read_eth_ring_count += n;
51827 +
51828 + n = sprintf(p,
51829 + " %7lx %8p %4u %12u %13s %13u %7s\n",
51830 + (long) adapter->rx_eq.q.dma_mem.dma,
51831 + (void *)adapter->rx_eq.q.dma_mem.va,
51832 + (u32) (adapter->rx_eq.q.len *
51833 + sizeof(struct be_eq_entry)),
51834 + adapter->rx_eq.q.len, "NA",
51835 + adapter->rx_eq.q.tail, "NA");
51836 +
51837 + p += n;
51838 + read_eth_ring_count += n;
51839 + }
51840 +
51841 + *start = page;
51842 + /* copy whatever we can */
51843 + if (count < (read_eth_ring_count - off)) {
51844 + i = count;
51845 + *eof = 0; /* More bytes left */
51846 + } else {
51847 + i = read_eth_ring_count - off;
51848 + *eof = 1; /* Nothing left. indicate EOF */
51849 + }
51850 +
51851 + memcpy(page, read_eth_ring_buf + off, i);
51852 + return (i);
51853 +}
51854 +
51855 +static int proc_eth_write_ring(struct file *file,
51856 + const char *buffer, unsigned long count,
51857 + void *data)
51858 +{
51859 + return (count); /* we do not support write */
51860 +}
51861 +
51862 +/*
51863 + * read the driver stats.
51864 + */
51865 +static int proc_read_drvr_stat(char *page, char **start,
51866 + off_t off, int count, int *eof, void *data)
51867 +{
51868 + int n, lro_cp;
51869 + char *p = page;
51870 + struct be_adapter *adapter = (struct be_adapter *) data;
51871 + struct net_device *netdev = adapter->netdev;
51872 +
51873 + if (off == 0) {
51874 + n = sprintf(p, "interface = %s\n", netdev->name);
51875 + p += n;
51876 + n = sprintf(p, "tx_reqs = %d\n",
51877 + drvr_stats(adapter)->be_tx_reqs);
51878 + p += n;
51879 + n = sprintf(p, "tx_stops = %d\n",
51880 + drvr_stats(adapter)->be_tx_stops);
51881 + p += n;
51882 + n = sprintf(p, "fwd_reqs = %d\n",
51883 + drvr_stats(adapter)->be_fwd_reqs);
51884 + p += n;
51885 + n = sprintf(p, "tx_wrbs = %d\n",
51886 + drvr_stats(adapter)->be_tx_wrbs);
51887 + p += n;
51888 + n = sprintf(p, "rx_poll = %d\n", drvr_stats(adapter)->be_rx_polls);
51889 + p += n;
51890 + n = sprintf(p, "tx_events = %d\n",
51891 + drvr_stats(adapter)->be_tx_events);
51892 + p += n;
51893 + n = sprintf(p, "rx_events = %d\n",
51894 + drvr_stats(adapter)->be_rx_events);
51895 + p += n;
51896 + n = sprintf(p, "tx_compl = %d\n",
51897 + drvr_stats(adapter)->be_tx_compl);
51898 + p += n;
51899 + n = sprintf(p, "rx_compl = %d\n",
51900 + drvr_stats(adapter)->be_rx_compl);
51901 + p += n;
51902 + n = sprintf(p, "ethrx_post_fail = %d\n",
51903 + drvr_stats(adapter)->be_ethrx_post_fail);
51904 + p += n;
51905 + n = sprintf(p, "802.3_dropped_frames = %d\n",
51906 + drvr_stats(adapter)->be_802_3_dropped_frames);
51907 + p += n;
51908 + n = sprintf(p, "802.3_malformed_frames = %d\n",
51909 + drvr_stats(adapter)->be_802_3_malformed_frames);
51910 + p += n;
51911 + n = sprintf(p, "eth_tx_rate = %d\n",
51912 + drvr_stats(adapter)->be_tx_rate);
51913 + p += n;
51914 + n = sprintf(p, "eth_rx_rate = %d\n",
51915 + drvr_stats(adapter)->be_rx_rate);
51916 + p += n;
51917 +
51918 + lro_cp = (drvr_stats(adapter)->be_lro_hgram_data[0] +
51919 + drvr_stats(adapter)->be_lro_hgram_data[1] +
51920 + drvr_stats(adapter)->be_lro_hgram_data[2] +
51921 + drvr_stats(adapter)->be_lro_hgram_data[3] +
51922 + drvr_stats(adapter)->be_lro_hgram_data[4] +
51923 + drvr_stats(adapter)->be_lro_hgram_data[5] +
51924 + drvr_stats(adapter)->be_lro_hgram_data[6] +
51925 + drvr_stats(adapter)->be_lro_hgram_data[7])/100;
51926 + lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */
51927 + n = sprintf(p,
51928 + "LRO data count %% histogram (1, 2-3, 4-5,..,>=16) = "
51929 + "%d, %d, %d, %d - %d, %d, %d, %d\n",
51930 + drvr_stats(adapter)->be_lro_hgram_data[0]/lro_cp,
51931 + drvr_stats(adapter)->be_lro_hgram_data[1]/lro_cp,
51932 + drvr_stats(adapter)->be_lro_hgram_data[2]/lro_cp,
51933 + drvr_stats(adapter)->be_lro_hgram_data[3]/lro_cp,
51934 + drvr_stats(adapter)->be_lro_hgram_data[4]/lro_cp,
51935 + drvr_stats(adapter)->be_lro_hgram_data[5]/lro_cp,
51936 + drvr_stats(adapter)->be_lro_hgram_data[6]/lro_cp,
51937 + drvr_stats(adapter)->be_lro_hgram_data[7]/lro_cp);
51938 + p += n;
51939 +
51940 + lro_cp = (drvr_stats(adapter)->be_lro_hgram_ack[0] +
51941 + drvr_stats(adapter)->be_lro_hgram_ack[1] +
51942 + drvr_stats(adapter)->be_lro_hgram_ack[2] +
51943 + drvr_stats(adapter)->be_lro_hgram_ack[3] +
51944 + drvr_stats(adapter)->be_lro_hgram_ack[4] +
51945 + drvr_stats(adapter)->be_lro_hgram_ack[5] +
51946 + drvr_stats(adapter)->be_lro_hgram_ack[6] +
51947 + drvr_stats(adapter)->be_lro_hgram_ack[7])/100;
51948 + lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */
51949 + n = sprintf(p,
51950 + "LRO ack count %% histogram (1, 2-3, 4-5,..,>=16) = "
51951 + "%d, %d, %d, %d - %d, %d, %d, %d\n",
51952 + drvr_stats(adapter)->be_lro_hgram_ack[0]/lro_cp,
51953 + drvr_stats(adapter)->be_lro_hgram_ack[1]/lro_cp,
51954 + drvr_stats(adapter)->be_lro_hgram_ack[2]/lro_cp,
51955 + drvr_stats(adapter)->be_lro_hgram_ack[3]/lro_cp,
51956 + drvr_stats(adapter)->be_lro_hgram_ack[4]/lro_cp,
51957 + drvr_stats(adapter)->be_lro_hgram_ack[5]/lro_cp,
51958 + drvr_stats(adapter)->be_lro_hgram_ack[6]/lro_cp,
51959 + drvr_stats(adapter)->be_lro_hgram_ack[7]/lro_cp);
51960 + p += n;
51961 + n = sprintf(p, "rx_eq_delay = %d \n", adapter->rx_eq.cur_eqd);
51962 + p += n;
51963 + n = sprintf(p, "rx frags per sec=%d \n",
51964 + drvr_stats(adapter)->be_rx_fps);
51965 + p += n;
51966 +
51967 + }
51968 + *eof = 1;
51969 + return (p - page);
51970 +}
51971 +
51972 +static int proc_write_drvr_stat(struct file *file,
51973 + const char *buffer, unsigned long count,
51974 + void *data)
51975 +{
51976 + struct be_adapter *adapter = (struct be_adapter *) data;
51977 +
51978 + memset(&(adapter->stats.drvr_stats), 0,
51979 + sizeof(adapter->stats.drvr_stats));
51980 + return (count); /* we do not support write */
51981 +}
51982 +
51983 +#if 0
51984 +/* the following are some of the functions that are needed here
51985 + * until all initializations are done by MPU.
51986 + */
51987 +
51988 +u32
51989 +CsrReadDr(void* BaseAddress, u32 Offset)
51990 +{
51991 + u32 *rp;
51992 +
51993 + rp = (u32 *) (((u8 *) BaseAddress) + Offset);
51994 + return (*rp);
51995 +}
51996 +
51997 +/*!
51998 +
51999 +@brief
52000 + This routine writes to a register located within the CSR
52001 + space for a given function object.
52002 +
52003 +@param
52004 + FuncObj - Pointer to the function object to read from.
52005 +
52006 +@param
52007 + Offset - The Offset (in bytes) to write to within the function's CSR space.
52008 +
52009 +@param
52010 + Value - The value to write to the register.
52011 +
52012 +@return
52013 +
52014 +@note
52015 + IRQL: any
52016 +
52017 +*/
52018 +void
52019 +CsrWriteDr(void* BaseAddress, u32 Offset, u32 Value)
52020 +{
52021 + u32 *Register;
52022 +
52023 + Register = (u32 *) (((u8 *) BaseAddress) + Offset);
52024 +
52025 + //TRACE(DL_INFO, "CsrWrite[ %X ] <= %X", Register, Value);
52026 + *Register = Value;
52027 +}
52028 +u32 be_proc_csrr_offset = -1; /* to latch the offset of next CSR Read req. */
52029 +
52030 +/*
52031 + * read the csr_r file. return the 32 bit register value from
52032 + * CSR space at offset latched in the global location
52033 + * be_proc_csrr_offset
52034 + */
52035 +static int proc_read_csr_r(char *page, char **start,
52036 + off_t off, int count, int *eof, void *data)
52037 +{
52038 + struct be_adapter * adapter = (struct be_adapter *)data;
52039 + u32 val;
52040 + int n = 0;
52041 + if (be_proc_csrr_offset == -1)
52042 + return -EINVAL;
52043 +
52044 + if (off == 0) {
52045 + /* read the CSR at offset be_proc_csrr_offset and return */
52046 + val = CsrReadDr(adapter->csr_va, be_proc_csrr_offset);
52047 + n = sprintf(page, "0x%x\n", val);
52048 + }
52049 + *eof = 1;
52050 + return n;
52051 +}
52052 +
52053 +/*
52054 + * save the written value in be_proc_csrr_offset for next
52055 + * read from the file
52056 + */
52057 +static int proc_write_csr_r(struct file *file,
52058 + const char *buffer, unsigned long count, void *data)
52059 +{
52060 + char buf[64];
52061 + u32 n;
52062 +
52063 + if (count > sizeof(buf) + 1)
52064 + return -EINVAL;
52065 + if (copy_from_user(buf, buffer, count))
52066 + return -EFAULT;
52067 + buf[count] = '\0';
52068 +
52069 + n = simple_strtoul(buf, NULL, 16);
52070 + if (n < 0x50000)
52071 + be_proc_csrr_offset = n;
52072 + return (count);
52073 +}
52074 +
52075 +/*
52076 + * return the latched offset for reading the csr_r file.
52077 + */
52078 +static int proc_read_csr_w(char *page, char **start,
52079 + off_t off, int count, int *eof, void *data)
52080 +{
52081 +
52082 + *eof = 1;
52083 + return sprintf(page, "0x%x\n", be_proc_csrr_offset);
52084 +}
52085 +
52086 +/*
52087 + * the incoming string is of the form "<offset> <value>"
52088 + * where the offset is the offset of the register to be written
52089 + * and value is the value to be written.
52090 + */
52091 +static int proc_write_csr_w(struct file *file,
52092 + const char *buffer, unsigned long count,
52093 + void *data)
52094 +{
52095 + char buf[64];
52096 + char *p;
52097 + u32 n, val;
52098 + struct be_adapter * adapter = (struct be_adapter *)data;
52099 +
52100 + if (count > sizeof(buf) + 1)
52101 + return -EINVAL;
52102 + if (copy_from_user(buf, buffer, count))
52103 + return -EFAULT;
52104 + buf[count] = '\0';
52105 +
52106 + n = simple_strtoul(buf, &p, 16);
52107 + if (n > 0x50000)
52108 + return -EINVAL;
52109 +
52110 + /* now get the actual value to be written */
52111 + while (*p == ' ' || *p == '\t')
52112 + p++;
52113 + val = simple_strtoul(p, NULL, 16);
52114 + CsrWriteDr(adapter->csr_va, n, val);
52115 + return (count);
52116 +}
52117 +#endif
52118 +
52119 +void be_init_procfs(struct be_adapter *adapter, int adapt_num)
52120 +{
52121 + static struct proc_dir_entry *pde;
52122 +
52123 + if (adapt_num > MAX_BE_DEVICES - 1)
52124 + return;
52125 +
52126 + /* create directory */
52127 + be_proc_dir[adapt_num] =
52128 + proc_mkdir(be_adpt_name[adapt_num], NULL);
52129 + if (be_proc_dir[adapt_num]) {
52130 + (be_proc_dir[adapt_num])->owner = THIS_MODULE;
52131 + }
52132 +
52133 + pde = create_proc_entry(BE_ETH_RING_FILE, BE_PROC_MODE,
52134 + be_proc_dir[adapt_num]);
52135 + if (pde) {
52136 + pde->read_proc = proc_eth_read_ring;
52137 + pde->write_proc = proc_eth_write_ring;
52138 + pde->data = adapter;
52139 + pde->owner = THIS_MODULE;
52140 + }
52141 +
52142 + pde = create_proc_entry(BE_DRVR_STAT_FILE, BE_PROC_MODE,
52143 + be_proc_dir[adapt_num]);
52144 + if (pde) {
52145 + pde->read_proc = proc_read_drvr_stat;
52146 + pde->write_proc = proc_write_drvr_stat;
52147 + pde->data = adapter;
52148 + pde->owner = THIS_MODULE;
52149 + }
52150 +
52151 +#if 0
52152 + if ((pde = create_proc_entry(BE_CSR_R_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) {
52153 + pde->read_proc = proc_read_csr_r;
52154 + pde->write_proc = proc_write_csr_r;
52155 + pde->data = adapter;
52156 + pde->owner = THIS_MODULE;
52157 + }
52158 +
52159 + if ((pde = create_proc_entry(BE_CSR_W_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) {
52160 + pde->read_proc = proc_read_csr_w;
52161 + pde->write_proc = proc_write_csr_w;
52162 + pde->data = adapter;
52163 + pde->owner = THIS_MODULE;
52164 + }
52165 +#endif
52166 +}
52167 +
52168 +void be_cleanup_procfs(struct be_adapter *adapter, int adapt_num)
52169 +{
52170 + if (adapt_num > MAX_BE_DEVICES - 1)
52171 + return;
52172 + remove_proc_entry(BE_ETH_RING_FILE, be_proc_dir[adapt_num]);
52173 + remove_proc_entry(BE_DRVR_STAT_FILE, be_proc_dir[adapt_num]);
52174 + remove_proc_entry(BE_CSR_R_FILE, be_proc_dir[adapt_num]);
52175 + remove_proc_entry(BE_CSR_W_FILE, be_proc_dir[adapt_num]);
52176 + remove_proc_entry(be_adpt_name[adapt_num], NULL);
52177 +}
52178 diff --git a/drivers/net/benet/version.h b/drivers/net/benet/version.h
52179 new file mode 100644
52180 index 0000000..c7ed692
52181 --- /dev/null
52182 +++ b/drivers/net/benet/version.h
52183 @@ -0,0 +1,51 @@
52184 +#define STR_BE_BRANCH "0"
52185 +#define STR_BE_BUILD "479"
52186 +#define STR_BE_DOT "0"
52187 +#define STR_BE_MINOR "0"
52188 +#define STR_BE_MAJOR "4"
52189 +
52190 +#define BE_BRANCH 0
52191 +#define BE_BUILD 479
52192 +#define BE_DOT 0
52193 +#define BE_MINOR 0
52194 +#define BE_MAJOR 4
52195 +
52196 +#define MGMT_BRANCH 0
52197 +#define MGMT_BUILDNUM 479
52198 +#define MGMT_MINOR 0
52199 +#define MGMT_MAJOR 4
52200 +
52201 +#define BE_REDBOOT_VERSION "2.0.5.0"
52202 +
52203 +//start-auto
52204 +#define BUILD_MONTH "12"
52205 +#define BUILD_MONTH_NAME "December"
52206 +#define BUILD_DAY "6"
52207 +#define BUILD_YEAR "2011"
52208 +#define BUILD_24HOUR "21"
52209 +#define BUILD_12HOUR "9"
52210 +#define BUILD_AM_PM "PM"
52211 +#define BUILD_MIN "48"
52212 +#define BUILD_SEC "05"
52213 +#define BUILD_MONTH_NUMBER 12
52214 +#define BUILD_DAY_NUMBER 6
52215 +#define BUILD_YEAR_NUMBER 2011
52216 +#define BUILD_24HOUR_NUMBER 21
52217 +#define BUILD_12HOUR_NUMBER 9
52218 +#define BUILD_MIN_NUMBER 48
52219 +#define BUILD_SEC_NUMBER 5
52220 +#undef MAJOR_BUILD
52221 +#undef MINOR_BUILD
52222 +#undef DOT_BUILD
52223 +#define NUMBERED_BUILD
52224 +#undef BRANCH_BUILD
52225 +//end-auto
52226 +
52227 +#define ELX_FCOE_XROM_BIOS_VER "7.03a1"
52228 +#define ELX_FCoE_X86_VER "4.02a1"
52229 +#define ELX_FCoE_EFI_VER "5.01a1"
52230 +#define ELX_FCoE_FCODE_VER "4.01a0"
52231 +#define ELX_PXE_BIOS_VER "3.00a5"
52232 +#define ELX_UEFI_NIC_VER "2.10A10"
52233 +#define ELX_UEFI_FCODE_VER "1.10A0"
52234 +#define ELX_ISCSI_BIOS_VER "1.00A8"
52235 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
52236 index 4874b2b..67f8526 100644
52237 --- a/drivers/net/bnx2.c
52238 +++ b/drivers/net/bnx2.c
52239 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
52240 int rc = 0;
52241 u32 magic, csum;
52242
52243 + pax_track_stack();
52244 +
52245 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
52246 goto test_nvram_done;
52247
52248 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
52249 index fd3eb07..8a6978d 100644
52250 --- a/drivers/net/cxgb3/l2t.h
52251 +++ b/drivers/net/cxgb3/l2t.h
52252 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
52253 */
52254 struct l2t_skb_cb {
52255 arp_failure_handler_func arp_failure_handler;
52256 -};
52257 +} __no_const;
52258
52259 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
52260
52261 diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
52262 index 032cfe0..411af379 100644
52263 --- a/drivers/net/cxgb3/t3_hw.c
52264 +++ b/drivers/net/cxgb3/t3_hw.c
52265 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
52266 int i, addr, ret;
52267 struct t3_vpd vpd;
52268
52269 + pax_track_stack();
52270 +
52271 /*
52272 * Card information is normally at VPD_BASE but some early cards had
52273 * it at 0.
52274 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
52275 index d1e0563..b9e129c 100644
52276 --- a/drivers/net/e1000e/82571.c
52277 +++ b/drivers/net/e1000e/82571.c
52278 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
52279 {
52280 struct e1000_hw *hw = &adapter->hw;
52281 struct e1000_mac_info *mac = &hw->mac;
52282 - struct e1000_mac_operations *func = &mac->ops;
52283 + e1000_mac_operations_no_const *func = &mac->ops;
52284 u32 swsm = 0;
52285 u32 swsm2 = 0;
52286 bool force_clear_smbi = false;
52287 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
52288 temp = er32(ICRXDMTC);
52289 }
52290
52291 -static struct e1000_mac_operations e82571_mac_ops = {
52292 +static const struct e1000_mac_operations e82571_mac_ops = {
52293 /* .check_mng_mode: mac type dependent */
52294 /* .check_for_link: media type dependent */
52295 .id_led_init = e1000e_id_led_init,
52296 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
52297 .setup_led = e1000e_setup_led_generic,
52298 };
52299
52300 -static struct e1000_phy_operations e82_phy_ops_igp = {
52301 +static const struct e1000_phy_operations e82_phy_ops_igp = {
52302 .acquire_phy = e1000_get_hw_semaphore_82571,
52303 .check_reset_block = e1000e_check_reset_block_generic,
52304 .commit_phy = NULL,
52305 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
52306 .cfg_on_link_up = NULL,
52307 };
52308
52309 -static struct e1000_phy_operations e82_phy_ops_m88 = {
52310 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
52311 .acquire_phy = e1000_get_hw_semaphore_82571,
52312 .check_reset_block = e1000e_check_reset_block_generic,
52313 .commit_phy = e1000e_phy_sw_reset,
52314 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
52315 .cfg_on_link_up = NULL,
52316 };
52317
52318 -static struct e1000_phy_operations e82_phy_ops_bm = {
52319 +static const struct e1000_phy_operations e82_phy_ops_bm = {
52320 .acquire_phy = e1000_get_hw_semaphore_82571,
52321 .check_reset_block = e1000e_check_reset_block_generic,
52322 .commit_phy = e1000e_phy_sw_reset,
52323 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
52324 .cfg_on_link_up = NULL,
52325 };
52326
52327 -static struct e1000_nvm_operations e82571_nvm_ops = {
52328 +static const struct e1000_nvm_operations e82571_nvm_ops = {
52329 .acquire_nvm = e1000_acquire_nvm_82571,
52330 .read_nvm = e1000e_read_nvm_eerd,
52331 .release_nvm = e1000_release_nvm_82571,
52332 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
52333 index 47db9bd..fa58ccd 100644
52334 --- a/drivers/net/e1000e/e1000.h
52335 +++ b/drivers/net/e1000e/e1000.h
52336 @@ -375,9 +375,9 @@ struct e1000_info {
52337 u32 pba;
52338 u32 max_hw_frame_size;
52339 s32 (*get_variants)(struct e1000_adapter *);
52340 - struct e1000_mac_operations *mac_ops;
52341 - struct e1000_phy_operations *phy_ops;
52342 - struct e1000_nvm_operations *nvm_ops;
52343 + const struct e1000_mac_operations *mac_ops;
52344 + const struct e1000_phy_operations *phy_ops;
52345 + const struct e1000_nvm_operations *nvm_ops;
52346 };
52347
52348 /* hardware capability, feature, and workaround flags */
52349 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
52350 index ae5d736..e9a93a1 100644
52351 --- a/drivers/net/e1000e/es2lan.c
52352 +++ b/drivers/net/e1000e/es2lan.c
52353 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
52354 {
52355 struct e1000_hw *hw = &adapter->hw;
52356 struct e1000_mac_info *mac = &hw->mac;
52357 - struct e1000_mac_operations *func = &mac->ops;
52358 + e1000_mac_operations_no_const *func = &mac->ops;
52359
52360 /* Set media type */
52361 switch (adapter->pdev->device) {
52362 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
52363 temp = er32(ICRXDMTC);
52364 }
52365
52366 -static struct e1000_mac_operations es2_mac_ops = {
52367 +static const struct e1000_mac_operations es2_mac_ops = {
52368 .id_led_init = e1000e_id_led_init,
52369 .check_mng_mode = e1000e_check_mng_mode_generic,
52370 /* check_for_link dependent on media type */
52371 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
52372 .setup_led = e1000e_setup_led_generic,
52373 };
52374
52375 -static struct e1000_phy_operations es2_phy_ops = {
52376 +static const struct e1000_phy_operations es2_phy_ops = {
52377 .acquire_phy = e1000_acquire_phy_80003es2lan,
52378 .check_reset_block = e1000e_check_reset_block_generic,
52379 .commit_phy = e1000e_phy_sw_reset,
52380 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
52381 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
52382 };
52383
52384 -static struct e1000_nvm_operations es2_nvm_ops = {
52385 +static const struct e1000_nvm_operations es2_nvm_ops = {
52386 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
52387 .read_nvm = e1000e_read_nvm_eerd,
52388 .release_nvm = e1000_release_nvm_80003es2lan,
52389 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
52390 index 11f3b7c..6381887 100644
52391 --- a/drivers/net/e1000e/hw.h
52392 +++ b/drivers/net/e1000e/hw.h
52393 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
52394 s32 (*setup_physical_interface)(struct e1000_hw *);
52395 s32 (*setup_led)(struct e1000_hw *);
52396 };
52397 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52398
52399 /* Function pointers for the PHY. */
52400 struct e1000_phy_operations {
52401 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
52402 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
52403 s32 (*cfg_on_link_up)(struct e1000_hw *);
52404 };
52405 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
52406
52407 /* Function pointers for the NVM. */
52408 struct e1000_nvm_operations {
52409 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
52410 s32 (*validate_nvm)(struct e1000_hw *);
52411 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
52412 };
52413 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
52414
52415 struct e1000_mac_info {
52416 - struct e1000_mac_operations ops;
52417 + e1000_mac_operations_no_const ops;
52418
52419 u8 addr[6];
52420 u8 perm_addr[6];
52421 @@ -823,7 +826,7 @@ struct e1000_mac_info {
52422 };
52423
52424 struct e1000_phy_info {
52425 - struct e1000_phy_operations ops;
52426 + e1000_phy_operations_no_const ops;
52427
52428 enum e1000_phy_type type;
52429
52430 @@ -857,7 +860,7 @@ struct e1000_phy_info {
52431 };
52432
52433 struct e1000_nvm_info {
52434 - struct e1000_nvm_operations ops;
52435 + e1000_nvm_operations_no_const ops;
52436
52437 enum e1000_nvm_type type;
52438 enum e1000_nvm_override override;
52439 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
52440 index de39f9a..e28d3e0 100644
52441 --- a/drivers/net/e1000e/ich8lan.c
52442 +++ b/drivers/net/e1000e/ich8lan.c
52443 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
52444 }
52445 }
52446
52447 -static struct e1000_mac_operations ich8_mac_ops = {
52448 +static const struct e1000_mac_operations ich8_mac_ops = {
52449 .id_led_init = e1000e_id_led_init,
52450 .check_mng_mode = e1000_check_mng_mode_ich8lan,
52451 .check_for_link = e1000_check_for_copper_link_ich8lan,
52452 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
52453 /* id_led_init dependent on mac type */
52454 };
52455
52456 -static struct e1000_phy_operations ich8_phy_ops = {
52457 +static const struct e1000_phy_operations ich8_phy_ops = {
52458 .acquire_phy = e1000_acquire_swflag_ich8lan,
52459 .check_reset_block = e1000_check_reset_block_ich8lan,
52460 .commit_phy = NULL,
52461 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
52462 .write_phy_reg = e1000e_write_phy_reg_igp,
52463 };
52464
52465 -static struct e1000_nvm_operations ich8_nvm_ops = {
52466 +static const struct e1000_nvm_operations ich8_nvm_ops = {
52467 .acquire_nvm = e1000_acquire_nvm_ich8lan,
52468 .read_nvm = e1000_read_nvm_ich8lan,
52469 .release_nvm = e1000_release_nvm_ich8lan,
52470 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
52471 index 18d5fbb..542d96d 100644
52472 --- a/drivers/net/fealnx.c
52473 +++ b/drivers/net/fealnx.c
52474 @@ -151,7 +151,7 @@ struct chip_info {
52475 int flags;
52476 };
52477
52478 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
52479 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
52480 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
52481 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
52482 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
52483 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
52484 index 0e5b54b..b503f82 100644
52485 --- a/drivers/net/hamradio/6pack.c
52486 +++ b/drivers/net/hamradio/6pack.c
52487 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
52488 unsigned char buf[512];
52489 int count1;
52490
52491 + pax_track_stack();
52492 +
52493 if (!count)
52494 return;
52495
52496 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
52497 index 5862282..7cce8cb 100644
52498 --- a/drivers/net/ibmveth.c
52499 +++ b/drivers/net/ibmveth.c
52500 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
52501 NULL,
52502 };
52503
52504 -static struct sysfs_ops veth_pool_ops = {
52505 +static const struct sysfs_ops veth_pool_ops = {
52506 .show = veth_pool_show,
52507 .store = veth_pool_store,
52508 };
52509 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
52510 index d617f2d..57b5309 100644
52511 --- a/drivers/net/igb/e1000_82575.c
52512 +++ b/drivers/net/igb/e1000_82575.c
52513 @@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
52514 wr32(E1000_VT_CTL, vt_ctl);
52515 }
52516
52517 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
52518 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
52519 .reset_hw = igb_reset_hw_82575,
52520 .init_hw = igb_init_hw_82575,
52521 .check_for_link = igb_check_for_link_82575,
52522 @@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
52523 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
52524 };
52525
52526 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
52527 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
52528 .acquire = igb_acquire_phy_82575,
52529 .get_cfg_done = igb_get_cfg_done_82575,
52530 .release = igb_release_phy_82575,
52531 };
52532
52533 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
52534 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
52535 .acquire = igb_acquire_nvm_82575,
52536 .read = igb_read_nvm_eerd,
52537 .release = igb_release_nvm_82575,
52538 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
52539 index 72081df..d855cf5 100644
52540 --- a/drivers/net/igb/e1000_hw.h
52541 +++ b/drivers/net/igb/e1000_hw.h
52542 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
52543 s32 (*read_mac_addr)(struct e1000_hw *);
52544 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
52545 };
52546 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52547
52548 struct e1000_phy_operations {
52549 s32 (*acquire)(struct e1000_hw *);
52550 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
52551 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
52552 s32 (*write_reg)(struct e1000_hw *, u32, u16);
52553 };
52554 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
52555
52556 struct e1000_nvm_operations {
52557 s32 (*acquire)(struct e1000_hw *);
52558 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
52559 void (*release)(struct e1000_hw *);
52560 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
52561 };
52562 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
52563
52564 struct e1000_info {
52565 s32 (*get_invariants)(struct e1000_hw *);
52566 @@ -321,7 +324,7 @@ struct e1000_info {
52567 extern const struct e1000_info e1000_82575_info;
52568
52569 struct e1000_mac_info {
52570 - struct e1000_mac_operations ops;
52571 + e1000_mac_operations_no_const ops;
52572
52573 u8 addr[6];
52574 u8 perm_addr[6];
52575 @@ -365,7 +368,7 @@ struct e1000_mac_info {
52576 };
52577
52578 struct e1000_phy_info {
52579 - struct e1000_phy_operations ops;
52580 + e1000_phy_operations_no_const ops;
52581
52582 enum e1000_phy_type type;
52583
52584 @@ -400,7 +403,7 @@ struct e1000_phy_info {
52585 };
52586
52587 struct e1000_nvm_info {
52588 - struct e1000_nvm_operations ops;
52589 + e1000_nvm_operations_no_const ops;
52590
52591 enum e1000_nvm_type type;
52592 enum e1000_nvm_override override;
52593 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
52594 s32 (*check_for_ack)(struct e1000_hw *, u16);
52595 s32 (*check_for_rst)(struct e1000_hw *, u16);
52596 };
52597 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
52598
52599 struct e1000_mbx_stats {
52600 u32 msgs_tx;
52601 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
52602 };
52603
52604 struct e1000_mbx_info {
52605 - struct e1000_mbx_operations ops;
52606 + e1000_mbx_operations_no_const ops;
52607 struct e1000_mbx_stats stats;
52608 u32 timeout;
52609 u32 usec_delay;
52610 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
52611 index 1e8ce37..549c453 100644
52612 --- a/drivers/net/igbvf/vf.h
52613 +++ b/drivers/net/igbvf/vf.h
52614 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
52615 s32 (*read_mac_addr)(struct e1000_hw *);
52616 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
52617 };
52618 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52619
52620 struct e1000_mac_info {
52621 - struct e1000_mac_operations ops;
52622 + e1000_mac_operations_no_const ops;
52623 u8 addr[6];
52624 u8 perm_addr[6];
52625
52626 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
52627 s32 (*check_for_ack)(struct e1000_hw *);
52628 s32 (*check_for_rst)(struct e1000_hw *);
52629 };
52630 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
52631
52632 struct e1000_mbx_stats {
52633 u32 msgs_tx;
52634 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
52635 };
52636
52637 struct e1000_mbx_info {
52638 - struct e1000_mbx_operations ops;
52639 + e1000_mbx_operations_no_const ops;
52640 struct e1000_mbx_stats stats;
52641 u32 timeout;
52642 u32 usec_delay;
52643 diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
52644 index aa7286b..a61394f 100644
52645 --- a/drivers/net/iseries_veth.c
52646 +++ b/drivers/net/iseries_veth.c
52647 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
52648 NULL
52649 };
52650
52651 -static struct sysfs_ops veth_cnx_sysfs_ops = {
52652 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
52653 .show = veth_cnx_attribute_show
52654 };
52655
52656 @@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
52657 NULL
52658 };
52659
52660 -static struct sysfs_ops veth_port_sysfs_ops = {
52661 +static const struct sysfs_ops veth_port_sysfs_ops = {
52662 .show = veth_port_attribute_show
52663 };
52664
52665 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
52666 index 8aa44dc..fa1e797 100644
52667 --- a/drivers/net/ixgb/ixgb_main.c
52668 +++ b/drivers/net/ixgb/ixgb_main.c
52669 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
52670 u32 rctl;
52671 int i;
52672
52673 + pax_track_stack();
52674 +
52675 /* Check for Promiscuous and All Multicast modes */
52676
52677 rctl = IXGB_READ_REG(hw, RCTL);
52678 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
52679 index af35e1d..8781785 100644
52680 --- a/drivers/net/ixgb/ixgb_param.c
52681 +++ b/drivers/net/ixgb/ixgb_param.c
52682 @@ -260,6 +260,9 @@ void __devinit
52683 ixgb_check_options(struct ixgb_adapter *adapter)
52684 {
52685 int bd = adapter->bd_number;
52686 +
52687 + pax_track_stack();
52688 +
52689 if (bd >= IXGB_MAX_NIC) {
52690 printk(KERN_NOTICE
52691 "Warning: no configuration for board #%i\n", bd);
52692 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
52693 index b17aa73..ed74540 100644
52694 --- a/drivers/net/ixgbe/ixgbe_type.h
52695 +++ b/drivers/net/ixgbe/ixgbe_type.h
52696 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
52697 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
52698 s32 (*update_checksum)(struct ixgbe_hw *);
52699 };
52700 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
52701
52702 struct ixgbe_mac_operations {
52703 s32 (*init_hw)(struct ixgbe_hw *);
52704 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
52705 /* Flow Control */
52706 s32 (*fc_enable)(struct ixgbe_hw *, s32);
52707 };
52708 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
52709
52710 struct ixgbe_phy_operations {
52711 s32 (*identify)(struct ixgbe_hw *);
52712 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
52713 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
52714 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
52715 };
52716 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
52717
52718 struct ixgbe_eeprom_info {
52719 - struct ixgbe_eeprom_operations ops;
52720 + ixgbe_eeprom_operations_no_const ops;
52721 enum ixgbe_eeprom_type type;
52722 u32 semaphore_delay;
52723 u16 word_size;
52724 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
52725 };
52726
52727 struct ixgbe_mac_info {
52728 - struct ixgbe_mac_operations ops;
52729 + ixgbe_mac_operations_no_const ops;
52730 enum ixgbe_mac_type type;
52731 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
52732 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
52733 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
52734 };
52735
52736 struct ixgbe_phy_info {
52737 - struct ixgbe_phy_operations ops;
52738 + ixgbe_phy_operations_no_const ops;
52739 struct mdio_if_info mdio;
52740 enum ixgbe_phy_type type;
52741 u32 id;
52742 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
52743 index 291a505..2543756 100644
52744 --- a/drivers/net/mlx4/main.c
52745 +++ b/drivers/net/mlx4/main.c
52746 @@ -38,6 +38,7 @@
52747 #include <linux/errno.h>
52748 #include <linux/pci.h>
52749 #include <linux/dma-mapping.h>
52750 +#include <linux/sched.h>
52751
52752 #include <linux/mlx4/device.h>
52753 #include <linux/mlx4/doorbell.h>
52754 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
52755 u64 icm_size;
52756 int err;
52757
52758 + pax_track_stack();
52759 +
52760 err = mlx4_QUERY_FW(dev);
52761 if (err) {
52762 if (err == -EACCES)
52763 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
52764 index 2dce134..fa5ce75 100644
52765 --- a/drivers/net/niu.c
52766 +++ b/drivers/net/niu.c
52767 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
52768 int i, num_irqs, err;
52769 u8 first_ldg;
52770
52771 + pax_track_stack();
52772 +
52773 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
52774 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
52775 ldg_num_map[i] = first_ldg + i;
52776 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
52777 index c1b3f09..97cd8c4 100644
52778 --- a/drivers/net/pcnet32.c
52779 +++ b/drivers/net/pcnet32.c
52780 @@ -79,7 +79,7 @@ static int cards_found;
52781 /*
52782 * VLB I/O addresses
52783 */
52784 -static unsigned int pcnet32_portlist[] __initdata =
52785 +static unsigned int pcnet32_portlist[] __devinitdata =
52786 { 0x300, 0x320, 0x340, 0x360, 0 };
52787
52788 static int pcnet32_debug = 0;
52789 @@ -267,7 +267,7 @@ struct pcnet32_private {
52790 struct sk_buff **rx_skbuff;
52791 dma_addr_t *tx_dma_addr;
52792 dma_addr_t *rx_dma_addr;
52793 - struct pcnet32_access a;
52794 + struct pcnet32_access *a;
52795 spinlock_t lock; /* Guard lock */
52796 unsigned int cur_rx, cur_tx; /* The next free ring entry */
52797 unsigned int rx_ring_size; /* current rx ring size */
52798 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
52799 u16 val;
52800
52801 netif_wake_queue(dev);
52802 - val = lp->a.read_csr(ioaddr, CSR3);
52803 + val = lp->a->read_csr(ioaddr, CSR3);
52804 val &= 0x00ff;
52805 - lp->a.write_csr(ioaddr, CSR3, val);
52806 + lp->a->write_csr(ioaddr, CSR3, val);
52807 napi_enable(&lp->napi);
52808 }
52809
52810 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
52811 r = mii_link_ok(&lp->mii_if);
52812 } else if (lp->chip_version >= PCNET32_79C970A) {
52813 ulong ioaddr = dev->base_addr; /* card base I/O address */
52814 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
52815 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
52816 } else { /* can not detect link on really old chips */
52817 r = 1;
52818 }
52819 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
52820 pcnet32_netif_stop(dev);
52821
52822 spin_lock_irqsave(&lp->lock, flags);
52823 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52824 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52825
52826 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
52827
52828 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
52829 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52830 {
52831 struct pcnet32_private *lp = netdev_priv(dev);
52832 - struct pcnet32_access *a = &lp->a; /* access to registers */
52833 + struct pcnet32_access *a = lp->a; /* access to registers */
52834 ulong ioaddr = dev->base_addr; /* card base I/O address */
52835 struct sk_buff *skb; /* sk buff */
52836 int x, i; /* counters */
52837 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52838 pcnet32_netif_stop(dev);
52839
52840 spin_lock_irqsave(&lp->lock, flags);
52841 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52842 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52843
52844 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
52845
52846 /* Reset the PCNET32 */
52847 - lp->a.reset(ioaddr);
52848 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52849 + lp->a->reset(ioaddr);
52850 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52851
52852 /* switch pcnet32 to 32bit mode */
52853 - lp->a.write_bcr(ioaddr, 20, 2);
52854 + lp->a->write_bcr(ioaddr, 20, 2);
52855
52856 /* purge & init rings but don't actually restart */
52857 pcnet32_restart(dev, 0x0000);
52858
52859 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52860 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52861
52862 /* Initialize Transmit buffers. */
52863 size = data_len + 15;
52864 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52865
52866 /* set int loopback in CSR15 */
52867 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
52868 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
52869 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
52870
52871 teststatus = cpu_to_le16(0x8000);
52872 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
52873 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
52874
52875 /* Check status of descriptors */
52876 for (x = 0; x < numbuffs; x++) {
52877 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52878 }
52879 }
52880
52881 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52882 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52883 wmb();
52884 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
52885 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
52886 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52887 pcnet32_restart(dev, CSR0_NORMAL);
52888 } else {
52889 pcnet32_purge_rx_ring(dev);
52890 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
52891 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
52892 }
52893 spin_unlock_irqrestore(&lp->lock, flags);
52894
52895 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52896 static void pcnet32_led_blink_callback(struct net_device *dev)
52897 {
52898 struct pcnet32_private *lp = netdev_priv(dev);
52899 - struct pcnet32_access *a = &lp->a;
52900 + struct pcnet32_access *a = lp->a;
52901 ulong ioaddr = dev->base_addr;
52902 unsigned long flags;
52903 int i;
52904 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
52905 static int pcnet32_phys_id(struct net_device *dev, u32 data)
52906 {
52907 struct pcnet32_private *lp = netdev_priv(dev);
52908 - struct pcnet32_access *a = &lp->a;
52909 + struct pcnet32_access *a = lp->a;
52910 ulong ioaddr = dev->base_addr;
52911 unsigned long flags;
52912 int i, regs[4];
52913 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
52914 {
52915 int csr5;
52916 struct pcnet32_private *lp = netdev_priv(dev);
52917 - struct pcnet32_access *a = &lp->a;
52918 + struct pcnet32_access *a = lp->a;
52919 ulong ioaddr = dev->base_addr;
52920 int ticks;
52921
52922 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
52923 spin_lock_irqsave(&lp->lock, flags);
52924 if (pcnet32_tx(dev)) {
52925 /* reset the chip to clear the error condition, then restart */
52926 - lp->a.reset(ioaddr);
52927 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52928 + lp->a->reset(ioaddr);
52929 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52930 pcnet32_restart(dev, CSR0_START);
52931 netif_wake_queue(dev);
52932 }
52933 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
52934 __napi_complete(napi);
52935
52936 /* clear interrupt masks */
52937 - val = lp->a.read_csr(ioaddr, CSR3);
52938 + val = lp->a->read_csr(ioaddr, CSR3);
52939 val &= 0x00ff;
52940 - lp->a.write_csr(ioaddr, CSR3, val);
52941 + lp->a->write_csr(ioaddr, CSR3, val);
52942
52943 /* Set interrupt enable. */
52944 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
52945 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
52946
52947 spin_unlock_irqrestore(&lp->lock, flags);
52948 }
52949 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
52950 int i, csr0;
52951 u16 *buff = ptr;
52952 struct pcnet32_private *lp = netdev_priv(dev);
52953 - struct pcnet32_access *a = &lp->a;
52954 + struct pcnet32_access *a = lp->a;
52955 ulong ioaddr = dev->base_addr;
52956 unsigned long flags;
52957
52958 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
52959 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
52960 if (lp->phymask & (1 << j)) {
52961 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
52962 - lp->a.write_bcr(ioaddr, 33,
52963 + lp->a->write_bcr(ioaddr, 33,
52964 (j << 5) | i);
52965 - *buff++ = lp->a.read_bcr(ioaddr, 34);
52966 + *buff++ = lp->a->read_bcr(ioaddr, 34);
52967 }
52968 }
52969 }
52970 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52971 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
52972 lp->options |= PCNET32_PORT_FD;
52973
52974 - lp->a = *a;
52975 + lp->a = a;
52976
52977 /* prior to register_netdev, dev->name is not yet correct */
52978 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
52979 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52980 if (lp->mii) {
52981 /* lp->phycount and lp->phymask are set to 0 by memset above */
52982
52983 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
52984 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
52985 /* scan for PHYs */
52986 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
52987 unsigned short id1, id2;
52988 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52989 "Found PHY %04x:%04x at address %d.\n",
52990 id1, id2, i);
52991 }
52992 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
52993 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
52994 if (lp->phycount > 1) {
52995 lp->options |= PCNET32_PORT_MII;
52996 }
52997 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
52998 }
52999
53000 /* Reset the PCNET32 */
53001 - lp->a.reset(ioaddr);
53002 + lp->a->reset(ioaddr);
53003
53004 /* switch pcnet32 to 32bit mode */
53005 - lp->a.write_bcr(ioaddr, 20, 2);
53006 + lp->a->write_bcr(ioaddr, 20, 2);
53007
53008 if (netif_msg_ifup(lp))
53009 printk(KERN_DEBUG
53010 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
53011 (u32) (lp->init_dma_addr));
53012
53013 /* set/reset autoselect bit */
53014 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
53015 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
53016 if (lp->options & PCNET32_PORT_ASEL)
53017 val |= 2;
53018 - lp->a.write_bcr(ioaddr, 2, val);
53019 + lp->a->write_bcr(ioaddr, 2, val);
53020
53021 /* handle full duplex setting */
53022 if (lp->mii_if.full_duplex) {
53023 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
53024 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
53025 if (lp->options & PCNET32_PORT_FD) {
53026 val |= 1;
53027 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
53028 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
53029 if (lp->chip_version == 0x2627)
53030 val |= 3;
53031 }
53032 - lp->a.write_bcr(ioaddr, 9, val);
53033 + lp->a->write_bcr(ioaddr, 9, val);
53034 }
53035
53036 /* set/reset GPSI bit in test register */
53037 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
53038 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
53039 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
53040 val |= 0x10;
53041 - lp->a.write_csr(ioaddr, 124, val);
53042 + lp->a->write_csr(ioaddr, 124, val);
53043
53044 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
53045 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
53046 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
53047 * duplex, and/or enable auto negotiation, and clear DANAS
53048 */
53049 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
53050 - lp->a.write_bcr(ioaddr, 32,
53051 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
53052 + lp->a->write_bcr(ioaddr, 32,
53053 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
53054 /* disable Auto Negotiation, set 10Mpbs, HD */
53055 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
53056 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
53057 if (lp->options & PCNET32_PORT_FD)
53058 val |= 0x10;
53059 if (lp->options & PCNET32_PORT_100)
53060 val |= 0x08;
53061 - lp->a.write_bcr(ioaddr, 32, val);
53062 + lp->a->write_bcr(ioaddr, 32, val);
53063 } else {
53064 if (lp->options & PCNET32_PORT_ASEL) {
53065 - lp->a.write_bcr(ioaddr, 32,
53066 - lp->a.read_bcr(ioaddr,
53067 + lp->a->write_bcr(ioaddr, 32,
53068 + lp->a->read_bcr(ioaddr,
53069 32) | 0x0080);
53070 /* enable auto negotiate, setup, disable fd */
53071 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
53072 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
53073 val |= 0x20;
53074 - lp->a.write_bcr(ioaddr, 32, val);
53075 + lp->a->write_bcr(ioaddr, 32, val);
53076 }
53077 }
53078 } else {
53079 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
53080 * There is really no good other way to handle multiple PHYs
53081 * other than turning off all automatics
53082 */
53083 - val = lp->a.read_bcr(ioaddr, 2);
53084 - lp->a.write_bcr(ioaddr, 2, val & ~2);
53085 - val = lp->a.read_bcr(ioaddr, 32);
53086 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
53087 + val = lp->a->read_bcr(ioaddr, 2);
53088 + lp->a->write_bcr(ioaddr, 2, val & ~2);
53089 + val = lp->a->read_bcr(ioaddr, 32);
53090 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
53091
53092 if (!(lp->options & PCNET32_PORT_ASEL)) {
53093 /* setup ecmd */
53094 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
53095 ecmd.speed =
53096 lp->
53097 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
53098 - bcr9 = lp->a.read_bcr(ioaddr, 9);
53099 + bcr9 = lp->a->read_bcr(ioaddr, 9);
53100
53101 if (lp->options & PCNET32_PORT_FD) {
53102 ecmd.duplex = DUPLEX_FULL;
53103 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
53104 ecmd.duplex = DUPLEX_HALF;
53105 bcr9 |= ~(1 << 0);
53106 }
53107 - lp->a.write_bcr(ioaddr, 9, bcr9);
53108 + lp->a->write_bcr(ioaddr, 9, bcr9);
53109 }
53110
53111 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
53112 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
53113
53114 #ifdef DO_DXSUFLO
53115 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
53116 - val = lp->a.read_csr(ioaddr, CSR3);
53117 + val = lp->a->read_csr(ioaddr, CSR3);
53118 val |= 0x40;
53119 - lp->a.write_csr(ioaddr, CSR3, val);
53120 + lp->a->write_csr(ioaddr, CSR3, val);
53121 }
53122 #endif
53123
53124 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
53125 napi_enable(&lp->napi);
53126
53127 /* Re-initialize the PCNET32, and start it when done. */
53128 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
53129 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
53130 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
53131 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
53132
53133 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
53134 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
53135 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
53136 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
53137
53138 netif_start_queue(dev);
53139
53140 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
53141
53142 i = 0;
53143 while (i++ < 100)
53144 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
53145 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
53146 break;
53147 /*
53148 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
53149 * reports that doing so triggers a bug in the '974.
53150 */
53151 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
53152 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
53153
53154 if (netif_msg_ifup(lp))
53155 printk(KERN_DEBUG
53156 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
53157 dev->name, i,
53158 (u32) (lp->init_dma_addr),
53159 - lp->a.read_csr(ioaddr, CSR0));
53160 + lp->a->read_csr(ioaddr, CSR0));
53161
53162 spin_unlock_irqrestore(&lp->lock, flags);
53163
53164 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
53165 * Switch back to 16bit mode to avoid problems with dumb
53166 * DOS packet driver after a warm reboot
53167 */
53168 - lp->a.write_bcr(ioaddr, 20, 4);
53169 + lp->a->write_bcr(ioaddr, 20, 4);
53170
53171 err_free_irq:
53172 spin_unlock_irqrestore(&lp->lock, flags);
53173 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
53174
53175 /* wait for stop */
53176 for (i = 0; i < 100; i++)
53177 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
53178 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
53179 break;
53180
53181 if (i >= 100 && netif_msg_drv(lp))
53182 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
53183 return;
53184
53185 /* ReInit Ring */
53186 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
53187 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
53188 i = 0;
53189 while (i++ < 1000)
53190 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
53191 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
53192 break;
53193
53194 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
53195 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
53196 }
53197
53198 static void pcnet32_tx_timeout(struct net_device *dev)
53199 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
53200 if (pcnet32_debug & NETIF_MSG_DRV)
53201 printk(KERN_ERR
53202 "%s: transmit timed out, status %4.4x, resetting.\n",
53203 - dev->name, lp->a.read_csr(ioaddr, CSR0));
53204 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53205 + dev->name, lp->a->read_csr(ioaddr, CSR0));
53206 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53207 dev->stats.tx_errors++;
53208 if (netif_msg_tx_err(lp)) {
53209 int i;
53210 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
53211 if (netif_msg_tx_queued(lp)) {
53212 printk(KERN_DEBUG
53213 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
53214 - dev->name, lp->a.read_csr(ioaddr, CSR0));
53215 + dev->name, lp->a->read_csr(ioaddr, CSR0));
53216 }
53217
53218 /* Default status -- will not enable Successful-TxDone
53219 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
53220 dev->stats.tx_bytes += skb->len;
53221
53222 /* Trigger an immediate send poll. */
53223 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
53224 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
53225
53226 dev->trans_start = jiffies;
53227
53228 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
53229
53230 spin_lock(&lp->lock);
53231
53232 - csr0 = lp->a.read_csr(ioaddr, CSR0);
53233 + csr0 = lp->a->read_csr(ioaddr, CSR0);
53234 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
53235 if (csr0 == 0xffff) {
53236 break; /* PCMCIA remove happened */
53237 }
53238 /* Acknowledge all of the current interrupt sources ASAP. */
53239 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
53240 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
53241
53242 if (netif_msg_intr(lp))
53243 printk(KERN_DEBUG
53244 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
53245 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
53246 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
53247
53248 /* Log misc errors. */
53249 if (csr0 & 0x4000)
53250 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
53251 if (napi_schedule_prep(&lp->napi)) {
53252 u16 val;
53253 /* set interrupt masks */
53254 - val = lp->a.read_csr(ioaddr, CSR3);
53255 + val = lp->a->read_csr(ioaddr, CSR3);
53256 val |= 0x5f00;
53257 - lp->a.write_csr(ioaddr, CSR3, val);
53258 + lp->a->write_csr(ioaddr, CSR3, val);
53259
53260 __napi_schedule(&lp->napi);
53261 break;
53262 }
53263 - csr0 = lp->a.read_csr(ioaddr, CSR0);
53264 + csr0 = lp->a->read_csr(ioaddr, CSR0);
53265 }
53266
53267 if (netif_msg_intr(lp))
53268 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
53269 - dev->name, lp->a.read_csr(ioaddr, CSR0));
53270 + dev->name, lp->a->read_csr(ioaddr, CSR0));
53271
53272 spin_unlock(&lp->lock);
53273
53274 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
53275
53276 spin_lock_irqsave(&lp->lock, flags);
53277
53278 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
53279 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
53280
53281 if (netif_msg_ifdown(lp))
53282 printk(KERN_DEBUG
53283 "%s: Shutting down ethercard, status was %2.2x.\n",
53284 - dev->name, lp->a.read_csr(ioaddr, CSR0));
53285 + dev->name, lp->a->read_csr(ioaddr, CSR0));
53286
53287 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
53288 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53289 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53290
53291 /*
53292 * Switch back to 16bit mode to avoid problems with dumb
53293 * DOS packet driver after a warm reboot
53294 */
53295 - lp->a.write_bcr(ioaddr, 20, 4);
53296 + lp->a->write_bcr(ioaddr, 20, 4);
53297
53298 spin_unlock_irqrestore(&lp->lock, flags);
53299
53300 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
53301 unsigned long flags;
53302
53303 spin_lock_irqsave(&lp->lock, flags);
53304 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
53305 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
53306 spin_unlock_irqrestore(&lp->lock, flags);
53307
53308 return &dev->stats;
53309 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
53310 if (dev->flags & IFF_ALLMULTI) {
53311 ib->filter[0] = cpu_to_le32(~0U);
53312 ib->filter[1] = cpu_to_le32(~0U);
53313 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
53314 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
53315 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
53316 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
53317 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
53318 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
53319 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
53320 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
53321 return;
53322 }
53323 /* clear the multicast filter */
53324 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
53325 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
53326 }
53327 for (i = 0; i < 4; i++)
53328 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
53329 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
53330 le16_to_cpu(mcast_table[i]));
53331 return;
53332 }
53333 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
53334
53335 spin_lock_irqsave(&lp->lock, flags);
53336 suspended = pcnet32_suspend(dev, &flags, 0);
53337 - csr15 = lp->a.read_csr(ioaddr, CSR15);
53338 + csr15 = lp->a->read_csr(ioaddr, CSR15);
53339 if (dev->flags & IFF_PROMISC) {
53340 /* Log any net taps. */
53341 if (netif_msg_hw(lp))
53342 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
53343 lp->init_block->mode =
53344 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
53345 7);
53346 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
53347 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
53348 } else {
53349 lp->init_block->mode =
53350 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
53351 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
53352 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
53353 pcnet32_load_multicast(dev);
53354 }
53355
53356 if (suspended) {
53357 int csr5;
53358 /* clear SUSPEND (SPND) - CSR5 bit 0 */
53359 - csr5 = lp->a.read_csr(ioaddr, CSR5);
53360 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
53361 + csr5 = lp->a->read_csr(ioaddr, CSR5);
53362 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
53363 } else {
53364 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53365 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53366 pcnet32_restart(dev, CSR0_NORMAL);
53367 netif_wake_queue(dev);
53368 }
53369 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
53370 if (!lp->mii)
53371 return 0;
53372
53373 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53374 - val_out = lp->a.read_bcr(ioaddr, 34);
53375 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53376 + val_out = lp->a->read_bcr(ioaddr, 34);
53377
53378 return val_out;
53379 }
53380 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
53381 if (!lp->mii)
53382 return;
53383
53384 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53385 - lp->a.write_bcr(ioaddr, 34, val);
53386 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53387 + lp->a->write_bcr(ioaddr, 34, val);
53388 }
53389
53390 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53391 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
53392 curr_link = mii_link_ok(&lp->mii_if);
53393 } else {
53394 ulong ioaddr = dev->base_addr; /* card base I/O address */
53395 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
53396 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
53397 }
53398 if (!curr_link) {
53399 if (prev_link || verbose) {
53400 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
53401 (ecmd.duplex ==
53402 DUPLEX_FULL) ? "full" : "half");
53403 }
53404 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
53405 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
53406 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
53407 if (lp->mii_if.full_duplex)
53408 bcr9 |= (1 << 0);
53409 else
53410 bcr9 &= ~(1 << 0);
53411 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
53412 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
53413 }
53414 } else {
53415 if (netif_msg_link(lp))
53416 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
53417 index 7cc9898..6eb50d3 100644
53418 --- a/drivers/net/sis190.c
53419 +++ b/drivers/net/sis190.c
53420 @@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
53421 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
53422 struct net_device *dev)
53423 {
53424 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
53425 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
53426 struct sis190_private *tp = netdev_priv(dev);
53427 struct pci_dev *isa_bridge;
53428 u8 reg, tmp8;
53429 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
53430 index e13685a..60c948c 100644
53431 --- a/drivers/net/sundance.c
53432 +++ b/drivers/net/sundance.c
53433 @@ -225,7 +225,7 @@ enum {
53434 struct pci_id_info {
53435 const char *name;
53436 };
53437 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
53438 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
53439 {"D-Link DFE-550TX FAST Ethernet Adapter"},
53440 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
53441 {"D-Link DFE-580TX 4 port Server Adapter"},
53442 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
53443 index 529f55a..cccaa18 100644
53444 --- a/drivers/net/tg3.h
53445 +++ b/drivers/net/tg3.h
53446 @@ -95,6 +95,7 @@
53447 #define CHIPREV_ID_5750_A0 0x4000
53448 #define CHIPREV_ID_5750_A1 0x4001
53449 #define CHIPREV_ID_5750_A3 0x4003
53450 +#define CHIPREV_ID_5750_C1 0x4201
53451 #define CHIPREV_ID_5750_C2 0x4202
53452 #define CHIPREV_ID_5752_A0_HW 0x5000
53453 #define CHIPREV_ID_5752_A0 0x6000
53454 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
53455 index b9db1b5..720f9ce 100644
53456 --- a/drivers/net/tokenring/abyss.c
53457 +++ b/drivers/net/tokenring/abyss.c
53458 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
53459
53460 static int __init abyss_init (void)
53461 {
53462 - abyss_netdev_ops = tms380tr_netdev_ops;
53463 + pax_open_kernel();
53464 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53465
53466 - abyss_netdev_ops.ndo_open = abyss_open;
53467 - abyss_netdev_ops.ndo_stop = abyss_close;
53468 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
53469 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
53470 + pax_close_kernel();
53471
53472 return pci_register_driver(&abyss_driver);
53473 }
53474 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
53475 index 456f8bf..373e56d 100644
53476 --- a/drivers/net/tokenring/madgemc.c
53477 +++ b/drivers/net/tokenring/madgemc.c
53478 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
53479
53480 static int __init madgemc_init (void)
53481 {
53482 - madgemc_netdev_ops = tms380tr_netdev_ops;
53483 - madgemc_netdev_ops.ndo_open = madgemc_open;
53484 - madgemc_netdev_ops.ndo_stop = madgemc_close;
53485 + pax_open_kernel();
53486 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53487 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
53488 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
53489 + pax_close_kernel();
53490
53491 return mca_register_driver (&madgemc_driver);
53492 }
53493 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
53494 index 16e8783..925bd49 100644
53495 --- a/drivers/net/tokenring/proteon.c
53496 +++ b/drivers/net/tokenring/proteon.c
53497 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
53498 struct platform_device *pdev;
53499 int i, num = 0, err = 0;
53500
53501 - proteon_netdev_ops = tms380tr_netdev_ops;
53502 - proteon_netdev_ops.ndo_open = proteon_open;
53503 - proteon_netdev_ops.ndo_stop = tms380tr_close;
53504 + pax_open_kernel();
53505 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53506 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
53507 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
53508 + pax_close_kernel();
53509
53510 err = platform_driver_register(&proteon_driver);
53511 if (err)
53512 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
53513 index 46db5c5..37c1536 100644
53514 --- a/drivers/net/tokenring/skisa.c
53515 +++ b/drivers/net/tokenring/skisa.c
53516 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
53517 struct platform_device *pdev;
53518 int i, num = 0, err = 0;
53519
53520 - sk_isa_netdev_ops = tms380tr_netdev_ops;
53521 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
53522 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
53523 + pax_open_kernel();
53524 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53525 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
53526 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
53527 + pax_close_kernel();
53528
53529 err = platform_driver_register(&sk_isa_driver);
53530 if (err)
53531 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
53532 index 74e5ba4..5cf6bc9 100644
53533 --- a/drivers/net/tulip/de2104x.c
53534 +++ b/drivers/net/tulip/de2104x.c
53535 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
53536 struct de_srom_info_leaf *il;
53537 void *bufp;
53538
53539 + pax_track_stack();
53540 +
53541 /* download entire eeprom */
53542 for (i = 0; i < DE_EEPROM_WORDS; i++)
53543 ((__le16 *)ee_data)[i] =
53544 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
53545 index a8349b7..90f9dfe 100644
53546 --- a/drivers/net/tulip/de4x5.c
53547 +++ b/drivers/net/tulip/de4x5.c
53548 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53549 for (i=0; i<ETH_ALEN; i++) {
53550 tmp.addr[i] = dev->dev_addr[i];
53551 }
53552 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
53553 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
53554 break;
53555
53556 case DE4X5_SET_HWADDR: /* Set the hardware address */
53557 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53558 spin_lock_irqsave(&lp->lock, flags);
53559 memcpy(&statbuf, &lp->pktStats, ioc->len);
53560 spin_unlock_irqrestore(&lp->lock, flags);
53561 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
53562 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
53563 return -EFAULT;
53564 break;
53565 }
53566 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
53567 index 391acd3..56d11cd 100644
53568 --- a/drivers/net/tulip/eeprom.c
53569 +++ b/drivers/net/tulip/eeprom.c
53570 @@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
53571 {NULL}};
53572
53573
53574 -static const char *block_name[] __devinitdata = {
53575 +static const char *block_name[] __devinitconst = {
53576 "21140 non-MII",
53577 "21140 MII PHY",
53578 "21142 Serial PHY",
53579 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
53580 index b38d3b7..b1cff23 100644
53581 --- a/drivers/net/tulip/winbond-840.c
53582 +++ b/drivers/net/tulip/winbond-840.c
53583 @@ -235,7 +235,7 @@ struct pci_id_info {
53584 int drv_flags; /* Driver use, intended as capability flags. */
53585 };
53586
53587 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
53588 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
53589 { /* Sometime a Level-One switch card. */
53590 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
53591 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
53592 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
53593 index f450bc9..2b747c8 100644
53594 --- a/drivers/net/usb/hso.c
53595 +++ b/drivers/net/usb/hso.c
53596 @@ -71,7 +71,7 @@
53597 #include <asm/byteorder.h>
53598 #include <linux/serial_core.h>
53599 #include <linux/serial.h>
53600 -
53601 +#include <asm/local.h>
53602
53603 #define DRIVER_VERSION "1.2"
53604 #define MOD_AUTHOR "Option Wireless"
53605 @@ -258,7 +258,7 @@ struct hso_serial {
53606
53607 /* from usb_serial_port */
53608 struct tty_struct *tty;
53609 - int open_count;
53610 + local_t open_count;
53611 spinlock_t serial_lock;
53612
53613 int (*write_data) (struct hso_serial *serial);
53614 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
53615 struct urb *urb;
53616
53617 urb = serial->rx_urb[0];
53618 - if (serial->open_count > 0) {
53619 + if (local_read(&serial->open_count) > 0) {
53620 count = put_rxbuf_data(urb, serial);
53621 if (count == -1)
53622 return;
53623 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
53624 DUMP1(urb->transfer_buffer, urb->actual_length);
53625
53626 /* Anyone listening? */
53627 - if (serial->open_count == 0)
53628 + if (local_read(&serial->open_count) == 0)
53629 return;
53630
53631 if (status == 0) {
53632 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
53633 spin_unlock_irq(&serial->serial_lock);
53634
53635 /* check for port already opened, if not set the termios */
53636 - serial->open_count++;
53637 - if (serial->open_count == 1) {
53638 + if (local_inc_return(&serial->open_count) == 1) {
53639 tty->low_latency = 1;
53640 serial->rx_state = RX_IDLE;
53641 /* Force default termio settings */
53642 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
53643 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
53644 if (result) {
53645 hso_stop_serial_device(serial->parent);
53646 - serial->open_count--;
53647 + local_dec(&serial->open_count);
53648 kref_put(&serial->parent->ref, hso_serial_ref_free);
53649 }
53650 } else {
53651 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
53652
53653 /* reset the rts and dtr */
53654 /* do the actual close */
53655 - serial->open_count--;
53656 + local_dec(&serial->open_count);
53657
53658 - if (serial->open_count <= 0) {
53659 - serial->open_count = 0;
53660 + if (local_read(&serial->open_count) <= 0) {
53661 + local_set(&serial->open_count, 0);
53662 spin_lock_irq(&serial->serial_lock);
53663 if (serial->tty == tty) {
53664 serial->tty->driver_data = NULL;
53665 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
53666
53667 /* the actual setup */
53668 spin_lock_irqsave(&serial->serial_lock, flags);
53669 - if (serial->open_count)
53670 + if (local_read(&serial->open_count))
53671 _hso_serial_set_termios(tty, old);
53672 else
53673 tty->termios = old;
53674 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
53675 /* Start all serial ports */
53676 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
53677 if (serial_table[i] && (serial_table[i]->interface == iface)) {
53678 - if (dev2ser(serial_table[i])->open_count) {
53679 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
53680 result =
53681 hso_start_serial_device(serial_table[i], GFP_NOIO);
53682 hso_kick_transmit(dev2ser(serial_table[i]));
53683 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
53684 index 3e94f0c..ffdd926 100644
53685 --- a/drivers/net/vxge/vxge-config.h
53686 +++ b/drivers/net/vxge/vxge-config.h
53687 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
53688 void (*link_down)(struct __vxge_hw_device *devh);
53689 void (*crit_err)(struct __vxge_hw_device *devh,
53690 enum vxge_hw_event type, u64 ext_data);
53691 -};
53692 +} __no_const;
53693
53694 /*
53695 * struct __vxge_hw_blockpool_entry - Block private data structure
53696 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
53697 index 068d7a9..35293de 100644
53698 --- a/drivers/net/vxge/vxge-main.c
53699 +++ b/drivers/net/vxge/vxge-main.c
53700 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
53701 struct sk_buff *completed[NR_SKB_COMPLETED];
53702 int more;
53703
53704 + pax_track_stack();
53705 +
53706 do {
53707 more = 0;
53708 skb_ptr = completed;
53709 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
53710 u8 mtable[256] = {0}; /* CPU to vpath mapping */
53711 int index;
53712
53713 + pax_track_stack();
53714 +
53715 /*
53716 * Filling
53717 * - itable with bucket numbers
53718 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
53719 index 461742b..81be42e 100644
53720 --- a/drivers/net/vxge/vxge-traffic.h
53721 +++ b/drivers/net/vxge/vxge-traffic.h
53722 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
53723 struct vxge_hw_mempool_dma *dma_object,
53724 u32 index,
53725 u32 is_last);
53726 -};
53727 +} __no_const;
53728
53729 void
53730 __vxge_hw_mempool_destroy(
53731 diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
53732 index cd8cb95..4153b79 100644
53733 --- a/drivers/net/wan/cycx_x25.c
53734 +++ b/drivers/net/wan/cycx_x25.c
53735 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
53736 unsigned char hex[1024],
53737 * phex = hex;
53738
53739 + pax_track_stack();
53740 +
53741 if (len >= (sizeof(hex) / 2))
53742 len = (sizeof(hex) / 2) - 1;
53743
53744 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
53745 index aa9248f..a4e3c3b 100644
53746 --- a/drivers/net/wan/hdlc_x25.c
53747 +++ b/drivers/net/wan/hdlc_x25.c
53748 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
53749
53750 static int x25_open(struct net_device *dev)
53751 {
53752 - struct lapb_register_struct cb;
53753 + static struct lapb_register_struct cb = {
53754 + .connect_confirmation = x25_connected,
53755 + .connect_indication = x25_connected,
53756 + .disconnect_confirmation = x25_disconnected,
53757 + .disconnect_indication = x25_disconnected,
53758 + .data_indication = x25_data_indication,
53759 + .data_transmit = x25_data_transmit
53760 + };
53761 int result;
53762
53763 - cb.connect_confirmation = x25_connected;
53764 - cb.connect_indication = x25_connected;
53765 - cb.disconnect_confirmation = x25_disconnected;
53766 - cb.disconnect_indication = x25_disconnected;
53767 - cb.data_indication = x25_data_indication;
53768 - cb.data_transmit = x25_data_transmit;
53769 -
53770 result = lapb_register(dev, &cb);
53771 if (result != LAPB_OK)
53772 return result;
53773 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
53774 index 5ad287c..783b020 100644
53775 --- a/drivers/net/wimax/i2400m/usb-fw.c
53776 +++ b/drivers/net/wimax/i2400m/usb-fw.c
53777 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
53778 int do_autopm = 1;
53779 DECLARE_COMPLETION_ONSTACK(notif_completion);
53780
53781 + pax_track_stack();
53782 +
53783 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
53784 i2400m, ack, ack_size);
53785 BUG_ON(_ack == i2400m->bm_ack_buf);
53786 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
53787 index 6c26840..62c97c3 100644
53788 --- a/drivers/net/wireless/airo.c
53789 +++ b/drivers/net/wireless/airo.c
53790 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
53791 BSSListElement * loop_net;
53792 BSSListElement * tmp_net;
53793
53794 + pax_track_stack();
53795 +
53796 /* Blow away current list of scan results */
53797 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
53798 list_move_tail (&loop_net->list, &ai->network_free_list);
53799 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
53800 WepKeyRid wkr;
53801 int rc;
53802
53803 + pax_track_stack();
53804 +
53805 memset( &mySsid, 0, sizeof( mySsid ) );
53806 kfree (ai->flash);
53807 ai->flash = NULL;
53808 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
53809 __le32 *vals = stats.vals;
53810 int len;
53811
53812 + pax_track_stack();
53813 +
53814 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
53815 return -ENOMEM;
53816 data = (struct proc_data *)file->private_data;
53817 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
53818 /* If doLoseSync is not 1, we won't do a Lose Sync */
53819 int doLoseSync = -1;
53820
53821 + pax_track_stack();
53822 +
53823 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
53824 return -ENOMEM;
53825 data = (struct proc_data *)file->private_data;
53826 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
53827 int i;
53828 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
53829
53830 + pax_track_stack();
53831 +
53832 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
53833 if (!qual)
53834 return -ENOMEM;
53835 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
53836 CapabilityRid cap_rid;
53837 __le32 *vals = stats_rid.vals;
53838
53839 + pax_track_stack();
53840 +
53841 /* Get stats out of the card */
53842 clear_bit(JOB_WSTATS, &local->jobs);
53843 if (local->power.event) {
53844 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
53845 index 747508c..c36cb08 100644
53846 --- a/drivers/net/wireless/ath/ath5k/debug.c
53847 +++ b/drivers/net/wireless/ath/ath5k/debug.c
53848 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
53849 unsigned int v;
53850 u64 tsf;
53851
53852 + pax_track_stack();
53853 +
53854 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
53855 len += snprintf(buf+len, sizeof(buf)-len,
53856 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
53857 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53858 unsigned int len = 0;
53859 unsigned int i;
53860
53861 + pax_track_stack();
53862 +
53863 len += snprintf(buf+len, sizeof(buf)-len,
53864 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
53865
53866 @@ -337,6 +341,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53867
53868 static ssize_t write_file_debug(struct file *file,
53869 const char __user *userbuf,
53870 + size_t count, loff_t *ppos) __size_overflow(3);
53871 +static ssize_t write_file_debug(struct file *file,
53872 + const char __user *userbuf,
53873 size_t count, loff_t *ppos)
53874 {
53875 struct ath5k_softc *sc = file->private_data;
53876 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
53877 index 2be4c22..a8ad784 100644
53878 --- a/drivers/net/wireless/ath/ath9k/debug.c
53879 +++ b/drivers/net/wireless/ath/ath9k/debug.c
53880 @@ -56,6 +56,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53881 }
53882
53883 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
53884 + size_t count, loff_t *ppos) __size_overflow(3);
53885 +static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
53886 size_t count, loff_t *ppos)
53887 {
53888 struct ath_softc *sc = file->private_data;
53889 @@ -220,6 +222,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
53890 char buf[512];
53891 unsigned int len = 0;
53892
53893 + pax_track_stack();
53894 +
53895 len += snprintf(buf + len, sizeof(buf) - len,
53896 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
53897 len += snprintf(buf + len, sizeof(buf) - len,
53898 @@ -360,6 +364,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
53899 int i;
53900 u8 addr[ETH_ALEN];
53901
53902 + pax_track_stack();
53903 +
53904 len += snprintf(buf + len, sizeof(buf) - len,
53905 "primary: %s (%s chan=%d ht=%d)\n",
53906 wiphy_name(sc->pri_wiphy->hw->wiphy),
53907 diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
53908 index 80b19a4..dab3a45 100644
53909 --- a/drivers/net/wireless/b43/debugfs.c
53910 +++ b/drivers/net/wireless/b43/debugfs.c
53911 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
53912 struct b43_debugfs_fops {
53913 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
53914 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
53915 - struct file_operations fops;
53916 + const struct file_operations fops;
53917 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
53918 size_t file_struct_offset;
53919 };
53920 diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
53921 index 1f85ac5..c99b4b4 100644
53922 --- a/drivers/net/wireless/b43legacy/debugfs.c
53923 +++ b/drivers/net/wireless/b43legacy/debugfs.c
53924 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
53925 struct b43legacy_debugfs_fops {
53926 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
53927 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
53928 - struct file_operations fops;
53929 + const struct file_operations fops;
53930 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
53931 size_t file_struct_offset;
53932 /* Take wl->irq_lock before calling read/write? */
53933 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
53934 index 43102bf..3b569c3 100644
53935 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
53936 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
53937 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
53938 int err;
53939 DECLARE_SSID_BUF(ssid);
53940
53941 + pax_track_stack();
53942 +
53943 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
53944
53945 if (ssid_len)
53946 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
53947 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
53948 int err;
53949
53950 + pax_track_stack();
53951 +
53952 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
53953 idx, keylen, len);
53954
53955 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
53956 index 282b1f7..169f0cf 100644
53957 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
53958 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
53959 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
53960 unsigned long flags;
53961 DECLARE_SSID_BUF(ssid);
53962
53963 + pax_track_stack();
53964 +
53965 LIBIPW_DEBUG_SCAN("'%s' (%pM"
53966 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
53967 print_ssid(ssid, info_element->data, info_element->len),
53968 diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
53969 index 950267a..80d5fd2 100644
53970 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c
53971 +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
53972 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
53973 },
53974 };
53975
53976 -static struct iwl_ops iwl1000_ops = {
53977 +static const struct iwl_ops iwl1000_ops = {
53978 .ucode = &iwl5000_ucode,
53979 .lib = &iwl1000_lib,
53980 .hcmd = &iwl5000_hcmd,
53981 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
53982 index 56bfcc3..b348020 100644
53983 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
53984 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
53985 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
53986 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
53987 };
53988
53989 -static struct iwl_ops iwl3945_ops = {
53990 +static const struct iwl_ops iwl3945_ops = {
53991 .ucode = &iwl3945_ucode,
53992 .lib = &iwl3945_lib,
53993 .hcmd = &iwl3945_hcmd,
53994 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
53995 index 585b8d4..e142963 100644
53996 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
53997 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
53998 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
53999 },
54000 };
54001
54002 -static struct iwl_ops iwl4965_ops = {
54003 +static const struct iwl_ops iwl4965_ops = {
54004 .ucode = &iwl4965_ucode,
54005 .lib = &iwl4965_lib,
54006 .hcmd = &iwl4965_hcmd,
54007 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
54008 index 1f423f2..e37c192 100644
54009 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
54010 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
54011 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
54012 },
54013 };
54014
54015 -struct iwl_ops iwl5000_ops = {
54016 +const struct iwl_ops iwl5000_ops = {
54017 .ucode = &iwl5000_ucode,
54018 .lib = &iwl5000_lib,
54019 .hcmd = &iwl5000_hcmd,
54020 .utils = &iwl5000_hcmd_utils,
54021 };
54022
54023 -static struct iwl_ops iwl5150_ops = {
54024 +static const struct iwl_ops iwl5150_ops = {
54025 .ucode = &iwl5000_ucode,
54026 .lib = &iwl5150_lib,
54027 .hcmd = &iwl5000_hcmd,
54028 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
54029 index 1473452..f07d5e1 100644
54030 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
54031 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
54032 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
54033 .calc_rssi = iwl5000_calc_rssi,
54034 };
54035
54036 -static struct iwl_ops iwl6000_ops = {
54037 +static const struct iwl_ops iwl6000_ops = {
54038 .ucode = &iwl5000_ucode,
54039 .lib = &iwl6000_lib,
54040 .hcmd = &iwl5000_hcmd,
54041 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
54042 index 1a3dfa2..b3e0a61 100644
54043 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
54044 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
54045 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
54046 u8 active_index = 0;
54047 s32 tpt = 0;
54048
54049 + pax_track_stack();
54050 +
54051 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
54052
54053 if (!ieee80211_is_data(hdr->frame_control) ||
54054 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
54055 u8 valid_tx_ant = 0;
54056 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
54057
54058 + pax_track_stack();
54059 +
54060 /* Override starting rate (index 0) if needed for debug purposes */
54061 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
54062
54063 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
54064 index 0e56d78..6a3c107 100644
54065 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
54066 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
54067 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
54068 if (iwl_debug_level & IWL_DL_INFO)
54069 dev_printk(KERN_DEBUG, &(pdev->dev),
54070 "Disabling hw_scan\n");
54071 - iwl_hw_ops.hw_scan = NULL;
54072 + pax_open_kernel();
54073 + *(void **)&iwl_hw_ops.hw_scan = NULL;
54074 + pax_close_kernel();
54075 }
54076
54077 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
54078 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
54079 index cbc6290..eb323d7 100644
54080 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
54081 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
54082 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
54083 #endif
54084
54085 #else
54086 -#define IWL_DEBUG(__priv, level, fmt, args...)
54087 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
54088 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
54089 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
54090 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
54091 void *p, u32 len)
54092 {}
54093 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54094 index a198bcf..8e68233 100644
54095 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54096 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54097 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
54098 int pos = 0;
54099 const size_t bufsz = sizeof(buf);
54100
54101 + pax_track_stack();
54102 +
54103 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
54104 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
54105 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
54106 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
54107 const size_t bufsz = sizeof(buf);
54108 ssize_t ret;
54109
54110 + pax_track_stack();
54111 +
54112 for (i = 0; i < AC_NUM; i++) {
54113 pos += scnprintf(buf + pos, bufsz - pos,
54114 "\tcw_min\tcw_max\taifsn\ttxop\n");
54115 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
54116 index 3539ea4..b174bfa 100644
54117 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
54118 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
54119 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
54120
54121 /* shared structures from iwl-5000.c */
54122 extern struct iwl_mod_params iwl50_mod_params;
54123 -extern struct iwl_ops iwl5000_ops;
54124 +extern const struct iwl_ops iwl5000_ops;
54125 extern struct iwl_ucode_ops iwl5000_ucode;
54126 extern struct iwl_lib_ops iwl5000_lib;
54127 extern struct iwl_hcmd_ops iwl5000_hcmd;
54128 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
54129 index 619590d..69235ee 100644
54130 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
54131 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
54132 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
54133 */
54134 if (iwl3945_mod_params.disable_hw_scan) {
54135 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
54136 - iwl3945_hw_ops.hw_scan = NULL;
54137 + pax_open_kernel();
54138 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
54139 + pax_close_kernel();
54140 }
54141
54142
54143 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
54144 index 1465379..fe4d78b 100644
54145 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
54146 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
54147 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
54148 int buf_len = 512;
54149 size_t len = 0;
54150
54151 + pax_track_stack();
54152 +
54153 if (*ppos != 0)
54154 return 0;
54155 if (count < sizeof(buf))
54156 diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
54157 index 893a55c..7f66a50 100644
54158 --- a/drivers/net/wireless/libertas/debugfs.c
54159 +++ b/drivers/net/wireless/libertas/debugfs.c
54160 @@ -708,7 +708,7 @@ out_unlock:
54161 struct lbs_debugfs_files {
54162 const char *name;
54163 int perm;
54164 - struct file_operations fops;
54165 + const struct file_operations fops;
54166 };
54167
54168 static const struct lbs_debugfs_files debugfs_files[] = {
54169 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
54170 index 2ecbedb..42704f0 100644
54171 --- a/drivers/net/wireless/rndis_wlan.c
54172 +++ b/drivers/net/wireless/rndis_wlan.c
54173 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
54174
54175 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
54176
54177 - if (rts_threshold < 0 || rts_threshold > 2347)
54178 + if (rts_threshold > 2347)
54179 rts_threshold = 2347;
54180
54181 tmp = cpu_to_le32(rts_threshold);
54182 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
54183 index 334ccd6..47f8944 100644
54184 --- a/drivers/oprofile/buffer_sync.c
54185 +++ b/drivers/oprofile/buffer_sync.c
54186 @@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
54187 if (cookie == NO_COOKIE)
54188 offset = pc;
54189 if (cookie == INVALID_COOKIE) {
54190 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
54191 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
54192 offset = pc;
54193 }
54194 if (cookie != last_cookie) {
54195 @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
54196 /* add userspace sample */
54197
54198 if (!mm) {
54199 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
54200 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
54201 return 0;
54202 }
54203
54204 cookie = lookup_dcookie(mm, s->eip, &offset);
54205
54206 if (cookie == INVALID_COOKIE) {
54207 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
54208 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
54209 return 0;
54210 }
54211
54212 @@ -562,7 +562,7 @@ void sync_buffer(int cpu)
54213 /* ignore backtraces if failed to add a sample */
54214 if (state == sb_bt_start) {
54215 state = sb_bt_ignore;
54216 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
54217 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
54218 }
54219 }
54220 release_mm(mm);
54221 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
54222 index 5df60a6..72f5c1c 100644
54223 --- a/drivers/oprofile/event_buffer.c
54224 +++ b/drivers/oprofile/event_buffer.c
54225 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
54226 }
54227
54228 if (buffer_pos == buffer_size) {
54229 - atomic_inc(&oprofile_stats.event_lost_overflow);
54230 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
54231 return;
54232 }
54233
54234 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
54235 index dc8a042..fe5f315 100644
54236 --- a/drivers/oprofile/oprof.c
54237 +++ b/drivers/oprofile/oprof.c
54238 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
54239 if (oprofile_ops.switch_events())
54240 return;
54241
54242 - atomic_inc(&oprofile_stats.multiplex_counter);
54243 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
54244 start_switch_worker();
54245 }
54246
54247 diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
54248 index bbd7516..1f97f55 100644
54249 --- a/drivers/oprofile/oprofile_files.c
54250 +++ b/drivers/oprofile/oprofile_files.c
54251 @@ -36,6 +36,8 @@ static ssize_t timeout_read(struct file *file, char __user *buf,
54252
54253
54254 static ssize_t timeout_write(struct file *file, char const __user *buf,
54255 + size_t count, loff_t *offset) __size_overflow(3);
54256 +static ssize_t timeout_write(struct file *file, char const __user *buf,
54257 size_t count, loff_t *offset)
54258 {
54259 unsigned long val;
54260 @@ -71,6 +73,7 @@ static ssize_t depth_read(struct file *file, char __user *buf, size_t count, lof
54261 }
54262
54263
54264 +static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54265 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54266 {
54267 unsigned long val;
54268 @@ -119,12 +122,14 @@ static const struct file_operations cpu_type_fops = {
54269 };
54270
54271
54272 +static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54273 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
54274 {
54275 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
54276 }
54277
54278
54279 +static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54280 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54281 {
54282 unsigned long val;
54283 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
54284 index 61689e8..387f7f8 100644
54285 --- a/drivers/oprofile/oprofile_stats.c
54286 +++ b/drivers/oprofile/oprofile_stats.c
54287 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
54288 cpu_buf->sample_invalid_eip = 0;
54289 }
54290
54291 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
54292 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
54293 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
54294 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
54295 - atomic_set(&oprofile_stats.multiplex_counter, 0);
54296 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
54297 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
54298 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
54299 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
54300 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
54301 }
54302
54303
54304 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
54305 index 0b54e46..a37c527 100644
54306 --- a/drivers/oprofile/oprofile_stats.h
54307 +++ b/drivers/oprofile/oprofile_stats.h
54308 @@ -13,11 +13,11 @@
54309 #include <asm/atomic.h>
54310
54311 struct oprofile_stat_struct {
54312 - atomic_t sample_lost_no_mm;
54313 - atomic_t sample_lost_no_mapping;
54314 - atomic_t bt_lost_no_mapping;
54315 - atomic_t event_lost_overflow;
54316 - atomic_t multiplex_counter;
54317 + atomic_unchecked_t sample_lost_no_mm;
54318 + atomic_unchecked_t sample_lost_no_mapping;
54319 + atomic_unchecked_t bt_lost_no_mapping;
54320 + atomic_unchecked_t event_lost_overflow;
54321 + atomic_unchecked_t multiplex_counter;
54322 };
54323
54324 extern struct oprofile_stat_struct oprofile_stats;
54325 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
54326 index 2766a6d..4d533c7 100644
54327 --- a/drivers/oprofile/oprofilefs.c
54328 +++ b/drivers/oprofile/oprofilefs.c
54329 @@ -89,6 +89,7 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
54330 }
54331
54332
54333 +static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54334 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54335 {
54336 unsigned long *value = file->private_data;
54337 @@ -187,7 +188,7 @@ static const struct file_operations atomic_ro_fops = {
54338
54339
54340 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
54341 - char const *name, atomic_t *val)
54342 + char const *name, atomic_unchecked_t *val)
54343 {
54344 struct dentry *d = __oprofilefs_create_file(sb, root, name,
54345 &atomic_ro_fops, 0444);
54346 diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
54347 index 13a64bc..ad62835 100644
54348 --- a/drivers/parisc/pdc_stable.c
54349 +++ b/drivers/parisc/pdc_stable.c
54350 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
54351 return ret;
54352 }
54353
54354 -static struct sysfs_ops pdcspath_attr_ops = {
54355 +static const struct sysfs_ops pdcspath_attr_ops = {
54356 .show = pdcspath_attr_show,
54357 .store = pdcspath_attr_store,
54358 };
54359 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
54360 index 8eefe56..40751a7 100644
54361 --- a/drivers/parport/procfs.c
54362 +++ b/drivers/parport/procfs.c
54363 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
54364
54365 *ppos += len;
54366
54367 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
54368 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
54369 }
54370
54371 #ifdef CONFIG_PARPORT_1284
54372 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
54373
54374 *ppos += len;
54375
54376 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
54377 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
54378 }
54379 #endif /* IEEE1284.3 support. */
54380
54381 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
54382 index 73e7d8e..c80f3d2 100644
54383 --- a/drivers/pci/hotplug/acpiphp_glue.c
54384 +++ b/drivers/pci/hotplug/acpiphp_glue.c
54385 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
54386 }
54387
54388
54389 -static struct acpi_dock_ops acpiphp_dock_ops = {
54390 +static const struct acpi_dock_ops acpiphp_dock_ops = {
54391 .handler = handle_hotplug_event_func,
54392 };
54393
54394 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
54395 index 9fff878..ad0ad53 100644
54396 --- a/drivers/pci/hotplug/cpci_hotplug.h
54397 +++ b/drivers/pci/hotplug/cpci_hotplug.h
54398 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
54399 int (*hardware_test) (struct slot* slot, u32 value);
54400 u8 (*get_power) (struct slot* slot);
54401 int (*set_power) (struct slot* slot, int value);
54402 -};
54403 +} __no_const;
54404
54405 struct cpci_hp_controller {
54406 unsigned int irq;
54407 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
54408 index 76ba8a1..20ca857 100644
54409 --- a/drivers/pci/hotplug/cpqphp_nvram.c
54410 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
54411 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
54412
54413 void compaq_nvram_init (void __iomem *rom_start)
54414 {
54415 +
54416 +#ifndef CONFIG_PAX_KERNEXEC
54417 if (rom_start) {
54418 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
54419 }
54420 +#endif
54421 +
54422 dbg("int15 entry = %p\n", compaq_int15_entry_point);
54423
54424 /* initialize our int15 lock */
54425 diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
54426 index 6151389..0a894ef 100644
54427 --- a/drivers/pci/hotplug/fakephp.c
54428 +++ b/drivers/pci/hotplug/fakephp.c
54429 @@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
54430 }
54431
54432 static struct kobj_type legacy_ktype = {
54433 - .sysfs_ops = &(struct sysfs_ops){
54434 + .sysfs_ops = &(const struct sysfs_ops){
54435 .store = legacy_store, .show = legacy_show
54436 },
54437 .release = &legacy_release,
54438 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
54439 index 5b680df..fe05b7e 100644
54440 --- a/drivers/pci/intel-iommu.c
54441 +++ b/drivers/pci/intel-iommu.c
54442 @@ -2643,7 +2643,7 @@ error:
54443 return 0;
54444 }
54445
54446 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
54447 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
54448 unsigned long offset, size_t size,
54449 enum dma_data_direction dir,
54450 struct dma_attrs *attrs)
54451 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
54452 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
54453 }
54454
54455 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54456 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54457 size_t size, enum dma_data_direction dir,
54458 struct dma_attrs *attrs)
54459 {
54460 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54461 }
54462 }
54463
54464 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
54465 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
54466 dma_addr_t *dma_handle, gfp_t flags)
54467 {
54468 void *vaddr;
54469 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
54470 return NULL;
54471 }
54472
54473 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54474 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54475 dma_addr_t dma_handle)
54476 {
54477 int order;
54478 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54479 free_pages((unsigned long)vaddr, order);
54480 }
54481
54482 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
54483 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
54484 int nelems, enum dma_data_direction dir,
54485 struct dma_attrs *attrs)
54486 {
54487 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
54488 return nelems;
54489 }
54490
54491 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
54492 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
54493 enum dma_data_direction dir, struct dma_attrs *attrs)
54494 {
54495 int i;
54496 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
54497 return nelems;
54498 }
54499
54500 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
54501 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
54502 {
54503 return !dma_addr;
54504 }
54505
54506 -struct dma_map_ops intel_dma_ops = {
54507 +const struct dma_map_ops intel_dma_ops = {
54508 .alloc_coherent = intel_alloc_coherent,
54509 .free_coherent = intel_free_coherent,
54510 .map_sg = intel_map_sg,
54511 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
54512 index 5b7056c..607bc94 100644
54513 --- a/drivers/pci/pcie/aspm.c
54514 +++ b/drivers/pci/pcie/aspm.c
54515 @@ -27,9 +27,9 @@
54516 #define MODULE_PARAM_PREFIX "pcie_aspm."
54517
54518 /* Note: those are not register definitions */
54519 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
54520 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
54521 -#define ASPM_STATE_L1 (4) /* L1 state */
54522 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
54523 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
54524 +#define ASPM_STATE_L1 (4U) /* L1 state */
54525 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
54526 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
54527
54528 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
54529 index 8105e32..ca10419 100644
54530 --- a/drivers/pci/probe.c
54531 +++ b/drivers/pci/probe.c
54532 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
54533 return ret;
54534 }
54535
54536 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
54537 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
54538 struct device_attribute *attr,
54539 char *buf)
54540 {
54541 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
54542 }
54543
54544 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
54545 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
54546 struct device_attribute *attr,
54547 char *buf)
54548 {
54549 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
54550 index a03ad8c..024b0da 100644
54551 --- a/drivers/pci/proc.c
54552 +++ b/drivers/pci/proc.c
54553 @@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
54554 static int __init pci_proc_init(void)
54555 {
54556 struct pci_dev *dev = NULL;
54557 +
54558 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
54559 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54560 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
54561 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54562 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54563 +#endif
54564 +#else
54565 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
54566 +#endif
54567 proc_create("devices", 0, proc_bus_pci_dir,
54568 &proc_bus_pci_dev_operations);
54569 proc_initialized = 1;
54570 diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
54571 index 8c02b6c..5584d8e 100644
54572 --- a/drivers/pci/slot.c
54573 +++ b/drivers/pci/slot.c
54574 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
54575 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
54576 }
54577
54578 -static struct sysfs_ops pci_slot_sysfs_ops = {
54579 +static const struct sysfs_ops pci_slot_sysfs_ops = {
54580 .show = pci_slot_attr_show,
54581 .store = pci_slot_attr_store,
54582 };
54583 diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
54584 index 30cf71d2..50938f1 100644
54585 --- a/drivers/pcmcia/pcmcia_ioctl.c
54586 +++ b/drivers/pcmcia/pcmcia_ioctl.c
54587 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
54588 return -EFAULT;
54589 }
54590 }
54591 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
54592 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
54593 if (!buf)
54594 return -ENOMEM;
54595
54596 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
54597 index 52183c4..b224c69 100644
54598 --- a/drivers/platform/x86/acer-wmi.c
54599 +++ b/drivers/platform/x86/acer-wmi.c
54600 @@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
54601 return 0;
54602 }
54603
54604 -static struct backlight_ops acer_bl_ops = {
54605 +static const struct backlight_ops acer_bl_ops = {
54606 .get_brightness = read_brightness,
54607 .update_status = update_bl_status,
54608 };
54609 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
54610 index 767cb61..a87380b 100644
54611 --- a/drivers/platform/x86/asus-laptop.c
54612 +++ b/drivers/platform/x86/asus-laptop.c
54613 @@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
54614 */
54615 static int read_brightness(struct backlight_device *bd);
54616 static int update_bl_status(struct backlight_device *bd);
54617 -static struct backlight_ops asusbl_ops = {
54618 +static const struct backlight_ops asusbl_ops = {
54619 .get_brightness = read_brightness,
54620 .update_status = update_bl_status,
54621 };
54622 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
54623 index d66c07a..a4abaac 100644
54624 --- a/drivers/platform/x86/asus_acpi.c
54625 +++ b/drivers/platform/x86/asus_acpi.c
54626 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
54627 return 0;
54628 }
54629
54630 -static struct backlight_ops asus_backlight_data = {
54631 +static const struct backlight_ops asus_backlight_data = {
54632 .get_brightness = read_brightness,
54633 .update_status = set_brightness_status,
54634 };
54635 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
54636 index 11003bb..550ff1b 100644
54637 --- a/drivers/platform/x86/compal-laptop.c
54638 +++ b/drivers/platform/x86/compal-laptop.c
54639 @@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
54640 return set_lcd_level(b->props.brightness);
54641 }
54642
54643 -static struct backlight_ops compalbl_ops = {
54644 +static const struct backlight_ops compalbl_ops = {
54645 .get_brightness = bl_get_brightness,
54646 .update_status = bl_update_status,
54647 };
54648 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
54649 index 07a74da..9dc99fa 100644
54650 --- a/drivers/platform/x86/dell-laptop.c
54651 +++ b/drivers/platform/x86/dell-laptop.c
54652 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
54653 return buffer.output[1];
54654 }
54655
54656 -static struct backlight_ops dell_ops = {
54657 +static const struct backlight_ops dell_ops = {
54658 .get_brightness = dell_get_intensity,
54659 .update_status = dell_send_intensity,
54660 };
54661 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
54662 index c533b1c..5c81f22 100644
54663 --- a/drivers/platform/x86/eeepc-laptop.c
54664 +++ b/drivers/platform/x86/eeepc-laptop.c
54665 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
54666 */
54667 static int read_brightness(struct backlight_device *bd);
54668 static int update_bl_status(struct backlight_device *bd);
54669 -static struct backlight_ops eeepcbl_ops = {
54670 +static const struct backlight_ops eeepcbl_ops = {
54671 .get_brightness = read_brightness,
54672 .update_status = update_bl_status,
54673 };
54674 diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
54675 index bcd4ba8..a249b35 100644
54676 --- a/drivers/platform/x86/fujitsu-laptop.c
54677 +++ b/drivers/platform/x86/fujitsu-laptop.c
54678 @@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
54679 return ret;
54680 }
54681
54682 -static struct backlight_ops fujitsubl_ops = {
54683 +static const struct backlight_ops fujitsubl_ops = {
54684 .get_brightness = bl_get_brightness,
54685 .update_status = bl_update_status,
54686 };
54687 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
54688 index 759763d..1093ba2 100644
54689 --- a/drivers/platform/x86/msi-laptop.c
54690 +++ b/drivers/platform/x86/msi-laptop.c
54691 @@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
54692 return set_lcd_level(b->props.brightness);
54693 }
54694
54695 -static struct backlight_ops msibl_ops = {
54696 +static const struct backlight_ops msibl_ops = {
54697 .get_brightness = bl_get_brightness,
54698 .update_status = bl_update_status,
54699 };
54700 diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
54701 index fe7cf01..9012d8d 100644
54702 --- a/drivers/platform/x86/panasonic-laptop.c
54703 +++ b/drivers/platform/x86/panasonic-laptop.c
54704 @@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
54705 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
54706 }
54707
54708 -static struct backlight_ops pcc_backlight_ops = {
54709 +static const struct backlight_ops pcc_backlight_ops = {
54710 .get_brightness = bl_get,
54711 .update_status = bl_set_status,
54712 };
54713 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
54714 index a2a742c..b37e25e 100644
54715 --- a/drivers/platform/x86/sony-laptop.c
54716 +++ b/drivers/platform/x86/sony-laptop.c
54717 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
54718 }
54719
54720 static struct backlight_device *sony_backlight_device;
54721 -static struct backlight_ops sony_backlight_ops = {
54722 +static const struct backlight_ops sony_backlight_ops = {
54723 .update_status = sony_backlight_update_status,
54724 .get_brightness = sony_backlight_get_brightness,
54725 };
54726 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
54727 index 68271ae..5e8fb10 100644
54728 --- a/drivers/platform/x86/thinkpad_acpi.c
54729 +++ b/drivers/platform/x86/thinkpad_acpi.c
54730 @@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
54731 return 0;
54732 }
54733
54734 -void static hotkey_mask_warn_incomplete_mask(void)
54735 +static void hotkey_mask_warn_incomplete_mask(void)
54736 {
54737 /* log only what the user can fix... */
54738 const u32 wantedmask = hotkey_driver_mask &
54739 @@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
54740 BACKLIGHT_UPDATE_HOTKEY);
54741 }
54742
54743 -static struct backlight_ops ibm_backlight_data = {
54744 +static const struct backlight_ops ibm_backlight_data = {
54745 .get_brightness = brightness_get,
54746 .update_status = brightness_update_status,
54747 };
54748 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
54749 index 51c0a8b..0786629 100644
54750 --- a/drivers/platform/x86/toshiba_acpi.c
54751 +++ b/drivers/platform/x86/toshiba_acpi.c
54752 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
54753 return AE_OK;
54754 }
54755
54756 -static struct backlight_ops toshiba_backlight_data = {
54757 +static const struct backlight_ops toshiba_backlight_data = {
54758 .get_brightness = get_lcd,
54759 .update_status = set_lcd_status,
54760 };
54761 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
54762 index fc83783c..cf370d7 100644
54763 --- a/drivers/pnp/pnpbios/bioscalls.c
54764 +++ b/drivers/pnp/pnpbios/bioscalls.c
54765 @@ -60,7 +60,7 @@ do { \
54766 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
54767 } while(0)
54768
54769 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
54770 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
54771 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
54772
54773 /*
54774 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
54775
54776 cpu = get_cpu();
54777 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
54778 +
54779 + pax_open_kernel();
54780 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
54781 + pax_close_kernel();
54782
54783 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
54784 spin_lock_irqsave(&pnp_bios_lock, flags);
54785 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
54786 :"memory");
54787 spin_unlock_irqrestore(&pnp_bios_lock, flags);
54788
54789 + pax_open_kernel();
54790 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
54791 + pax_close_kernel();
54792 +
54793 put_cpu();
54794
54795 /* If we get here and this is set then the PnP BIOS faulted on us. */
54796 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
54797 return status;
54798 }
54799
54800 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
54801 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
54802 {
54803 int i;
54804
54805 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
54806 pnp_bios_callpoint.offset = header->fields.pm16offset;
54807 pnp_bios_callpoint.segment = PNP_CS16;
54808
54809 + pax_open_kernel();
54810 +
54811 for_each_possible_cpu(i) {
54812 struct desc_struct *gdt = get_cpu_gdt_table(i);
54813 if (!gdt)
54814 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
54815 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
54816 (unsigned long)__va(header->fields.pm16dseg));
54817 }
54818 +
54819 + pax_close_kernel();
54820 }
54821 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
54822 index ba97654..66b99d4 100644
54823 --- a/drivers/pnp/resource.c
54824 +++ b/drivers/pnp/resource.c
54825 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
54826 return 1;
54827
54828 /* check if the resource is valid */
54829 - if (*irq < 0 || *irq > 15)
54830 + if (*irq > 15)
54831 return 0;
54832
54833 /* check if the resource is reserved */
54834 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
54835 return 1;
54836
54837 /* check if the resource is valid */
54838 - if (*dma < 0 || *dma == 4 || *dma > 7)
54839 + if (*dma == 4 || *dma > 7)
54840 return 0;
54841
54842 /* check if the resource is reserved */
54843 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
54844 index 62bb981..24a2dc9 100644
54845 --- a/drivers/power/bq27x00_battery.c
54846 +++ b/drivers/power/bq27x00_battery.c
54847 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
54848 struct bq27x00_access_methods {
54849 int (*read)(u8 reg, int *rt_value, int b_single,
54850 struct bq27x00_device_info *di);
54851 -};
54852 +} __no_const;
54853
54854 struct bq27x00_device_info {
54855 struct device *dev;
54856 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
54857 index 62227cd..b5b538b 100644
54858 --- a/drivers/rtc/rtc-dev.c
54859 +++ b/drivers/rtc/rtc-dev.c
54860 @@ -14,6 +14,7 @@
54861 #include <linux/module.h>
54862 #include <linux/rtc.h>
54863 #include <linux/sched.h>
54864 +#include <linux/grsecurity.h>
54865 #include "rtc-core.h"
54866
54867 static dev_t rtc_devt;
54868 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
54869 if (copy_from_user(&tm, uarg, sizeof(tm)))
54870 return -EFAULT;
54871
54872 + gr_log_timechange();
54873 +
54874 return rtc_set_time(rtc, &tm);
54875
54876 case RTC_PIE_ON:
54877 diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
54878 index 968e3c7..fbc637a 100644
54879 --- a/drivers/s390/cio/qdio_perf.c
54880 +++ b/drivers/s390/cio/qdio_perf.c
54881 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
54882 static int qdio_perf_proc_show(struct seq_file *m, void *v)
54883 {
54884 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
54885 - (long)atomic_long_read(&perf_stats.qdio_int));
54886 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
54887 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
54888 - (long)atomic_long_read(&perf_stats.pci_int));
54889 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
54890 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
54891 - (long)atomic_long_read(&perf_stats.thin_int));
54892 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
54893 seq_printf(m, "\n");
54894 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
54895 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
54896 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
54897 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
54898 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
54899 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
54900 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
54901 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
54902 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
54903 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
54904 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
54905 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
54906 - (long)atomic_long_read(&perf_stats.thinint_inbound),
54907 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
54908 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
54909 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
54910 seq_printf(m, "\n");
54911 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
54912 - (long)atomic_long_read(&perf_stats.siga_in));
54913 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
54914 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
54915 - (long)atomic_long_read(&perf_stats.siga_out));
54916 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
54917 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
54918 - (long)atomic_long_read(&perf_stats.siga_sync));
54919 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
54920 seq_printf(m, "\n");
54921 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
54922 - (long)atomic_long_read(&perf_stats.inbound_handler));
54923 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
54924 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
54925 - (long)atomic_long_read(&perf_stats.outbound_handler));
54926 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
54927 seq_printf(m, "\n");
54928 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
54929 - (long)atomic_long_read(&perf_stats.fast_requeue));
54930 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
54931 seq_printf(m, "Number of outbound target full condition\t: %li\n",
54932 - (long)atomic_long_read(&perf_stats.outbound_target_full));
54933 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
54934 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
54935 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
54936 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
54937 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
54938 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
54939 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
54940 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
54941 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
54942 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
54943 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
54944 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
54945 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
54946 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
54947 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
54948 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
54949 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
54950 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
54951 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
54952 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
54953 seq_printf(m, "\n");
54954 return 0;
54955 }
54956 diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
54957 index ff4504c..b3604c3 100644
54958 --- a/drivers/s390/cio/qdio_perf.h
54959 +++ b/drivers/s390/cio/qdio_perf.h
54960 @@ -13,46 +13,46 @@
54961
54962 struct qdio_perf_stats {
54963 /* interrupt handler calls */
54964 - atomic_long_t qdio_int;
54965 - atomic_long_t pci_int;
54966 - atomic_long_t thin_int;
54967 + atomic_long_unchecked_t qdio_int;
54968 + atomic_long_unchecked_t pci_int;
54969 + atomic_long_unchecked_t thin_int;
54970
54971 /* tasklet runs */
54972 - atomic_long_t tasklet_inbound;
54973 - atomic_long_t tasklet_outbound;
54974 - atomic_long_t tasklet_thinint;
54975 - atomic_long_t tasklet_thinint_loop;
54976 - atomic_long_t thinint_inbound;
54977 - atomic_long_t thinint_inbound_loop;
54978 - atomic_long_t thinint_inbound_loop2;
54979 + atomic_long_unchecked_t tasklet_inbound;
54980 + atomic_long_unchecked_t tasklet_outbound;
54981 + atomic_long_unchecked_t tasklet_thinint;
54982 + atomic_long_unchecked_t tasklet_thinint_loop;
54983 + atomic_long_unchecked_t thinint_inbound;
54984 + atomic_long_unchecked_t thinint_inbound_loop;
54985 + atomic_long_unchecked_t thinint_inbound_loop2;
54986
54987 /* signal adapter calls */
54988 - atomic_long_t siga_out;
54989 - atomic_long_t siga_in;
54990 - atomic_long_t siga_sync;
54991 + atomic_long_unchecked_t siga_out;
54992 + atomic_long_unchecked_t siga_in;
54993 + atomic_long_unchecked_t siga_sync;
54994
54995 /* misc */
54996 - atomic_long_t inbound_handler;
54997 - atomic_long_t outbound_handler;
54998 - atomic_long_t fast_requeue;
54999 - atomic_long_t outbound_target_full;
55000 + atomic_long_unchecked_t inbound_handler;
55001 + atomic_long_unchecked_t outbound_handler;
55002 + atomic_long_unchecked_t fast_requeue;
55003 + atomic_long_unchecked_t outbound_target_full;
55004
55005 /* for debugging */
55006 - atomic_long_t debug_tl_out_timer;
55007 - atomic_long_t debug_stop_polling;
55008 - atomic_long_t debug_eqbs_all;
55009 - atomic_long_t debug_eqbs_incomplete;
55010 - atomic_long_t debug_sqbs_all;
55011 - atomic_long_t debug_sqbs_incomplete;
55012 + atomic_long_unchecked_t debug_tl_out_timer;
55013 + atomic_long_unchecked_t debug_stop_polling;
55014 + atomic_long_unchecked_t debug_eqbs_all;
55015 + atomic_long_unchecked_t debug_eqbs_incomplete;
55016 + atomic_long_unchecked_t debug_sqbs_all;
55017 + atomic_long_unchecked_t debug_sqbs_incomplete;
55018 };
55019
55020 extern struct qdio_perf_stats perf_stats;
55021 extern int qdio_performance_stats;
55022
55023 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
55024 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
55025 {
55026 if (qdio_performance_stats)
55027 - atomic_long_inc(count);
55028 + atomic_long_inc_unchecked(count);
55029 }
55030
55031 int qdio_setup_perf_stats(void);
55032 diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
55033 new file mode 100644
55034 index 0000000..7d18a18
55035 --- /dev/null
55036 +++ b/drivers/scsi/3w-sas.c
55037 @@ -0,0 +1,1933 @@
55038 +/*
55039 + 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
55040 +
55041 + Written By: Adam Radford <linuxraid@lsi.com>
55042 +
55043 + Copyright (C) 2009 LSI Corporation.
55044 +
55045 + This program is free software; you can redistribute it and/or modify
55046 + it under the terms of the GNU General Public License as published by
55047 + the Free Software Foundation; version 2 of the License.
55048 +
55049 + This program is distributed in the hope that it will be useful,
55050 + but WITHOUT ANY WARRANTY; without even the implied warranty of
55051 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55052 + GNU General Public License for more details.
55053 +
55054 + NO WARRANTY
55055 + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
55056 + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
55057 + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
55058 + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
55059 + solely responsible for determining the appropriateness of using and
55060 + distributing the Program and assumes all risks associated with its
55061 + exercise of rights under this Agreement, including but not limited to
55062 + the risks and costs of program errors, damage to or loss of data,
55063 + programs or equipment, and unavailability or interruption of operations.
55064 +
55065 + DISCLAIMER OF LIABILITY
55066 + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
55067 + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55068 + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
55069 + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
55070 + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
55071 + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
55072 + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
55073 +
55074 + You should have received a copy of the GNU General Public License
55075 + along with this program; if not, write to the Free Software
55076 + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
55077 +
55078 + Controllers supported by this driver:
55079 +
55080 + LSI 3ware 9750 6Gb/s SAS/SATA-RAID
55081 +
55082 + Bugs/Comments/Suggestions should be mailed to:
55083 + linuxraid@lsi.com
55084 +
55085 + For more information, goto:
55086 + http://www.lsi.com
55087 +
55088 + History
55089 + -------
55090 + 3.26.00.000 - Initial driver release.
55091 +*/
55092 +
55093 +#include <linux/module.h>
55094 +#include <linux/reboot.h>
55095 +#include <linux/spinlock.h>
55096 +#include <linux/interrupt.h>
55097 +#include <linux/moduleparam.h>
55098 +#include <linux/errno.h>
55099 +#include <linux/types.h>
55100 +#include <linux/delay.h>
55101 +#include <linux/pci.h>
55102 +#include <linux/time.h>
55103 +#include <linux/mutex.h>
55104 +#include <linux/smp_lock.h>
55105 +#include <asm/io.h>
55106 +#include <asm/irq.h>
55107 +#include <asm/uaccess.h>
55108 +#include <scsi/scsi.h>
55109 +#include <scsi/scsi_host.h>
55110 +#include <scsi/scsi_tcq.h>
55111 +#include <scsi/scsi_cmnd.h>
55112 +#include "3w-sas.h"
55113 +
55114 +/* Globals */
55115 +#define TW_DRIVER_VERSION "3.26.00.028-2.6.32RH"
55116 +static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT];
55117 +static unsigned int twl_device_extension_count;
55118 +static int twl_major = -1;
55119 +extern struct timezone sys_tz;
55120 +
55121 +/* Module parameters */
55122 +MODULE_AUTHOR ("LSI");
55123 +MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver");
55124 +MODULE_LICENSE("GPL");
55125 +MODULE_VERSION(TW_DRIVER_VERSION);
55126 +
55127 +static int use_msi = 0;
55128 +module_param(use_msi, int, S_IRUGO);
55129 +MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
55130 +
55131 +/* Function prototypes */
55132 +static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
55133 +
55134 +/* Functions */
55135 +
55136 +/* This function returns AENs through sysfs */
55137 +static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
55138 + struct bin_attribute *bin_attr,
55139 + char *outbuf, loff_t offset, size_t count)
55140 +{
55141 + struct device *dev = container_of(kobj, struct device, kobj);
55142 + struct Scsi_Host *shost = class_to_shost(dev);
55143 + TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
55144 + unsigned long flags = 0;
55145 + ssize_t ret;
55146 +
55147 + if (!capable(CAP_SYS_ADMIN))
55148 + return -EACCES;
55149 +
55150 + spin_lock_irqsave(tw_dev->host->host_lock, flags);
55151 + ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH);
55152 + spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55153 +
55154 + return ret;
55155 +} /* End twl_sysfs_aen_read() */
55156 +
55157 +/* aen_read sysfs attribute initializer */
55158 +static struct bin_attribute twl_sysfs_aen_read_attr = {
55159 + .attr = {
55160 + .name = "3ware_aen_read",
55161 + .mode = S_IRUSR,
55162 + },
55163 + .size = 0,
55164 + .read = twl_sysfs_aen_read
55165 +};
55166 +
55167 +/* This function returns driver compatibility info through sysfs */
55168 +static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
55169 + struct bin_attribute *bin_attr,
55170 + char *outbuf, loff_t offset, size_t count)
55171 +{
55172 + struct device *dev = container_of(kobj, struct device, kobj);
55173 + struct Scsi_Host *shost = class_to_shost(dev);
55174 + TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
55175 + unsigned long flags = 0;
55176 + ssize_t ret;
55177 +
55178 + if (!capable(CAP_SYS_ADMIN))
55179 + return -EACCES;
55180 +
55181 + spin_lock_irqsave(tw_dev->host->host_lock, flags);
55182 + ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
55183 + spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55184 +
55185 + return ret;
55186 +} /* End twl_sysfs_compat_info() */
55187 +
55188 +/* compat_info sysfs attribute initializer */
55189 +static struct bin_attribute twl_sysfs_compat_info_attr = {
55190 + .attr = {
55191 + .name = "3ware_compat_info",
55192 + .mode = S_IRUSR,
55193 + },
55194 + .size = 0,
55195 + .read = twl_sysfs_compat_info
55196 +};
55197 +
55198 +/* Show some statistics about the card */
55199 +static ssize_t twl_show_stats(struct device *dev,
55200 + struct device_attribute *attr, char *buf)
55201 +{
55202 + struct Scsi_Host *host = class_to_shost(dev);
55203 + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
55204 + unsigned long flags = 0;
55205 + ssize_t len;
55206 +
55207 + spin_lock_irqsave(tw_dev->host->host_lock, flags);
55208 + len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n"
55209 + "Current commands posted: %4d\n"
55210 + "Max commands posted: %4d\n"
55211 + "Last sgl length: %4d\n"
55212 + "Max sgl length: %4d\n"
55213 + "Last sector count: %4d\n"
55214 + "Max sector count: %4d\n"
55215 + "SCSI Host Resets: %4d\n"
55216 + "AEN's: %4d\n",
55217 + TW_DRIVER_VERSION,
55218 + tw_dev->posted_request_count,
55219 + tw_dev->max_posted_request_count,
55220 + tw_dev->sgl_entries,
55221 + tw_dev->max_sgl_entries,
55222 + tw_dev->sector_count,
55223 + tw_dev->max_sector_count,
55224 + tw_dev->num_resets,
55225 + tw_dev->aen_count);
55226 + spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55227 + return len;
55228 +} /* End twl_show_stats() */
55229 +
55230 +/* This function will set a devices queue depth */
55231 +static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
55232 + int reason)
55233 +{
55234 + if (reason != SCSI_QDEPTH_DEFAULT)
55235 + return -EOPNOTSUPP;
55236 +
55237 + if (queue_depth > TW_Q_LENGTH-2)
55238 + queue_depth = TW_Q_LENGTH-2;
55239 + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
55240 + return queue_depth;
55241 +} /* End twl_change_queue_depth() */
55242 +
55243 +/* stats sysfs attribute initializer */
55244 +static struct device_attribute twl_host_stats_attr = {
55245 + .attr = {
55246 + .name = "3ware_stats",
55247 + .mode = S_IRUGO,
55248 + },
55249 + .show = twl_show_stats
55250 +};
55251 +
55252 +/* Host attributes initializer */
55253 +static struct device_attribute *twl_host_attrs[] = {
55254 + &twl_host_stats_attr,
55255 + NULL,
55256 +};
55257 +
55258 +/* This function will look up an AEN severity string */
55259 +static char *twl_aen_severity_lookup(unsigned char severity_code)
55260 +{
55261 + char *retval = NULL;
55262 +
55263 + if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
55264 + (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
55265 + goto out;
55266 +
55267 + retval = twl_aen_severity_table[severity_code];
55268 +out:
55269 + return retval;
55270 +} /* End twl_aen_severity_lookup() */
55271 +
55272 +/* This function will queue an event */
55273 +static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
55274 +{
55275 + u32 local_time;
55276 + struct timeval time;
55277 + TW_Event *event;
55278 + unsigned short aen;
55279 + char host[16];
55280 + char *error_str;
55281 +
55282 + tw_dev->aen_count++;
55283 +
55284 + /* Fill out event info */
55285 + event = tw_dev->event_queue[tw_dev->error_index];
55286 +
55287 + host[0] = '\0';
55288 + if (tw_dev->host)
55289 + sprintf(host, " scsi%d:", tw_dev->host->host_no);
55290 +
55291 + aen = le16_to_cpu(header->status_block.error);
55292 + memset(event, 0, sizeof(TW_Event));
55293 +
55294 + event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
55295 + do_gettimeofday(&time);
55296 + local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
55297 + event->time_stamp_sec = local_time;
55298 + event->aen_code = aen;
55299 + event->retrieved = TW_AEN_NOT_RETRIEVED;
55300 + event->sequence_id = tw_dev->error_sequence_id;
55301 + tw_dev->error_sequence_id++;
55302 +
55303 + /* Check for embedded error string */
55304 + error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
55305 +
55306 + header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
55307 + event->parameter_len = strlen(header->err_specific_desc);
55308 + memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str));
55309 + if (event->severity != TW_AEN_SEVERITY_DEBUG)
55310 + printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
55311 + host,
55312 + twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
55313 + TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str,
55314 + header->err_specific_desc);
55315 + else
55316 + tw_dev->aen_count--;
55317 +
55318 + tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
55319 +} /* End twl_aen_queue_event() */
55320 +
55321 +/* This function will attempt to post a command packet to the board */
55322 +static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
55323 +{
55324 + dma_addr_t command_que_value;
55325 +
55326 + command_que_value = tw_dev->command_packet_phys[request_id];
55327 + command_que_value += TW_COMMAND_OFFSET;
55328 +
55329 + /* First write upper 4 bytes */
55330 + writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev));
55331 + /* Then the lower 4 bytes */
55332 + writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev));
55333 +
55334 + tw_dev->state[request_id] = TW_S_POSTED;
55335 + tw_dev->posted_request_count++;
55336 + if (tw_dev->posted_request_count > tw_dev->max_posted_request_count)
55337 + tw_dev->max_posted_request_count = tw_dev->posted_request_count;
55338 +
55339 + return 0;
55340 +} /* End twl_post_command_packet() */
55341 +
55342 +/* This function will perform a pci-dma mapping for a scatter gather list */
55343 +static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
55344 +{
55345 + int use_sg;
55346 + struct scsi_cmnd *cmd = tw_dev->srb[request_id];
55347 +
55348 + use_sg = scsi_dma_map(cmd);
55349 + if (!use_sg)
55350 + return 0;
55351 + else if (use_sg < 0) {
55352 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
55353 + return 0;
55354 + }
55355 +
55356 + cmd->SCp.phase = TW_PHASE_SGLIST;
55357 + cmd->SCp.have_data_in = use_sg;
55358 +
55359 + return use_sg;
55360 +} /* End twl_map_scsi_sg_data() */
55361 +
55362 +/* This function hands scsi cdb's to the firmware */
55363 +static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
55364 +{
55365 + TW_Command_Full *full_command_packet;
55366 + TW_Command_Apache *command_packet;
55367 + int i, sg_count;
55368 + struct scsi_cmnd *srb = NULL;
55369 + struct scatterlist *sglist = NULL, *sg;
55370 + int retval = 1;
55371 +
55372 + if (tw_dev->srb[request_id]) {
55373 + srb = tw_dev->srb[request_id];
55374 + if (scsi_sglist(srb))
55375 + sglist = scsi_sglist(srb);
55376 + }
55377 +
55378 + /* Initialize command packet */
55379 + full_command_packet = tw_dev->command_packet_virt[request_id];
55380 + full_command_packet->header.header_desc.size_header = 128;
55381 + full_command_packet->header.status_block.error = 0;
55382 + full_command_packet->header.status_block.severity__reserved = 0;
55383 +
55384 + command_packet = &full_command_packet->command.newcommand;
55385 + command_packet->status = 0;
55386 + command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
55387 +
55388 + /* We forced 16 byte cdb use earlier */
55389 + if (!cdb)
55390 + memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
55391 + else
55392 + memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
55393 +
55394 + if (srb) {
55395 + command_packet->unit = srb->device->id;
55396 + command_packet->request_id__lunl =
55397 + cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
55398 + } else {
55399 + command_packet->request_id__lunl =
55400 + cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
55401 + command_packet->unit = 0;
55402 + }
55403 +
55404 + command_packet->sgl_offset = 16;
55405 +
55406 + if (!sglistarg) {
55407 + /* Map sglist from scsi layer to cmd packet */
55408 + if (scsi_sg_count(srb)) {
55409 + sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
55410 + if (sg_count == 0)
55411 + goto out;
55412 +
55413 + scsi_for_each_sg(srb, sg, sg_count, i) {
55414 + command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
55415 + command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg));
55416 + }
55417 + command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
55418 + }
55419 + } else {
55420 + /* Internal cdb post */
55421 + for (i = 0; i < use_sg; i++) {
55422 + command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
55423 + command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length);
55424 + }
55425 + command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
55426 + }
55427 +
55428 + /* Update some stats */
55429 + if (srb) {
55430 + tw_dev->sector_count = scsi_bufflen(srb) / 512;
55431 + if (tw_dev->sector_count > tw_dev->max_sector_count)
55432 + tw_dev->max_sector_count = tw_dev->sector_count;
55433 + tw_dev->sgl_entries = scsi_sg_count(srb);
55434 + if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
55435 + tw_dev->max_sgl_entries = tw_dev->sgl_entries;
55436 + }
55437 +
55438 + /* Now post the command to the board */
55439 + retval = twl_post_command_packet(tw_dev, request_id);
55440 +
55441 +out:
55442 + return retval;
55443 +} /* End twl_scsiop_execute_scsi() */
55444 +
55445 +/* This function will read the aen queue from the isr */
55446 +static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
55447 +{
55448 + char cdb[TW_MAX_CDB_LEN];
55449 + TW_SG_Entry_ISO sglist[1];
55450 + TW_Command_Full *full_command_packet;
55451 + int retval = 1;
55452 +
55453 + full_command_packet = tw_dev->command_packet_virt[request_id];
55454 + memset(full_command_packet, 0, sizeof(TW_Command_Full));
55455 +
55456 + /* Initialize cdb */
55457 + memset(&cdb, 0, TW_MAX_CDB_LEN);
55458 + cdb[0] = REQUEST_SENSE; /* opcode */
55459 + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
55460 +
55461 + /* Initialize sglist */
55462 + memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
55463 + sglist[0].length = TW_SECTOR_SIZE;
55464 + sglist[0].address = tw_dev->generic_buffer_phys[request_id];
55465 +
55466 + /* Mark internal command */
55467 + tw_dev->srb[request_id] = NULL;
55468 +
55469 + /* Now post the command packet */
55470 + if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
55471 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue");
55472 + goto out;
55473 + }
55474 + retval = 0;
55475 +out:
55476 + return retval;
55477 +} /* End twl_aen_read_queue() */
55478 +
55479 +/* This function will sync firmware time with the host time */
55480 +static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
55481 +{
55482 + u32 schedulertime;
55483 + struct timeval utc;
55484 + TW_Command_Full *full_command_packet;
55485 + TW_Command *command_packet;
55486 + TW_Param_Apache *param;
55487 + u32 local_time;
55488 +
55489 + /* Fill out the command packet */
55490 + full_command_packet = tw_dev->command_packet_virt[request_id];
55491 + memset(full_command_packet, 0, sizeof(TW_Command_Full));
55492 + command_packet = &full_command_packet->command.oldcommand;
55493 + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
55494 + command_packet->request_id = request_id;
55495 + command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
55496 + command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
55497 + command_packet->size = TW_COMMAND_SIZE;
55498 + command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
55499 +
55500 + /* Setup the param */
55501 + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
55502 + memset(param, 0, TW_SECTOR_SIZE);
55503 + param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
55504 + param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
55505 + param->parameter_size_bytes = cpu_to_le16(4);
55506 +
55507 + /* Convert system time in UTC to local time seconds since last
55508 + Sunday 12:00AM */
55509 + do_gettimeofday(&utc);
55510 + local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
55511 + schedulertime = local_time - (3 * 86400);
55512 + schedulertime = cpu_to_le32(schedulertime % 604800);
55513 +
55514 + memcpy(param->data, &schedulertime, sizeof(u32));
55515 +
55516 + /* Mark internal command */
55517 + tw_dev->srb[request_id] = NULL;
55518 +
55519 + /* Now post the command */
55520 + twl_post_command_packet(tw_dev, request_id);
55521 +} /* End twl_aen_sync_time() */
55522 +
55523 +/* This function will assign an available request id */
55524 +static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
55525 +{
55526 + *request_id = tw_dev->free_queue[tw_dev->free_head];
55527 + tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
55528 + tw_dev->state[*request_id] = TW_S_STARTED;
55529 +} /* End twl_get_request_id() */
55530 +
55531 +/* This function will free a request id */
55532 +static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id)
55533 +{
55534 + tw_dev->free_queue[tw_dev->free_tail] = request_id;
55535 + tw_dev->state[request_id] = TW_S_FINISHED;
55536 + tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
55537 +} /* End twl_free_request_id() */
55538 +
55539 +/* This function will complete an aen request from the isr */
55540 +static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id)
55541 +{
55542 + TW_Command_Full *full_command_packet;
55543 + TW_Command *command_packet;
55544 + TW_Command_Apache_Header *header;
55545 + unsigned short aen;
55546 + int retval = 1;
55547 +
55548 + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
55549 + tw_dev->posted_request_count--;
55550 + aen = le16_to_cpu(header->status_block.error);
55551 + full_command_packet = tw_dev->command_packet_virt[request_id];
55552 + command_packet = &full_command_packet->command.oldcommand;
55553 +
55554 + /* First check for internal completion of set param for time sync */
55555 + if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
55556 + /* Keep reading the queue in case there are more aen's */
55557 + if (twl_aen_read_queue(tw_dev, request_id))
55558 + goto out2;
55559 + else {
55560 + retval = 0;
55561 + goto out;
55562 + }
55563 + }
55564 +
55565 + switch (aen) {
55566 + case TW_AEN_QUEUE_EMPTY:
55567 + /* Quit reading the queue if this is the last one */
55568 + break;
55569 + case TW_AEN_SYNC_TIME_WITH_HOST:
55570 + twl_aen_sync_time(tw_dev, request_id);
55571 + retval = 0;
55572 + goto out;
55573 + default:
55574 + twl_aen_queue_event(tw_dev, header);
55575 +
55576 + /* If there are more aen's, keep reading the queue */
55577 + if (twl_aen_read_queue(tw_dev, request_id))
55578 + goto out2;
55579 + else {
55580 + retval = 0;
55581 + goto out;
55582 + }
55583 + }
55584 + retval = 0;
55585 +out2:
55586 + tw_dev->state[request_id] = TW_S_COMPLETED;
55587 + twl_free_request_id(tw_dev, request_id);
55588 + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
55589 +out:
55590 + return retval;
55591 +} /* End twl_aen_complete() */
55592 +
55593 +/* This function will poll for a response */
55594 +static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
55595 +{
55596 + unsigned long before;
55597 + dma_addr_t mfa;
55598 + u32 regh, regl;
55599 + u32 response;
55600 + int retval = 1;
55601 + int found = 0;
55602 +
55603 + before = jiffies;
55604 +
55605 + while (!found) {
55606 + if (sizeof(dma_addr_t) > 4) {
55607 + regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
55608 + regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
55609 + mfa = ((u64)regh << 32) | regl;
55610 + } else
55611 + mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
55612 +
55613 + response = (u32)mfa;
55614 +
55615 + if (TW_RESID_OUT(response) == request_id)
55616 + found = 1;
55617 +
55618 + if (time_after(jiffies, before + HZ * seconds))
55619 + goto out;
55620 +
55621 + msleep(50);
55622 + }
55623 + retval = 0;
55624 +out:
55625 + return retval;
55626 +} /* End twl_poll_response() */
55627 +
55628 +/* This function will drain the aen queue */
55629 +static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
55630 +{
55631 + int request_id = 0;
55632 + char cdb[TW_MAX_CDB_LEN];
55633 + TW_SG_Entry_ISO sglist[1];
55634 + int finished = 0, count = 0;
55635 + TW_Command_Full *full_command_packet;
55636 + TW_Command_Apache_Header *header;
55637 + unsigned short aen;
55638 + int first_reset = 0, queue = 0, retval = 1;
55639 +
55640 + if (no_check_reset)
55641 + first_reset = 0;
55642 + else
55643 + first_reset = 1;
55644 +
55645 + full_command_packet = tw_dev->command_packet_virt[request_id];
55646 + memset(full_command_packet, 0, sizeof(TW_Command_Full));
55647 +
55648 + /* Initialize cdb */
55649 + memset(&cdb, 0, TW_MAX_CDB_LEN);
55650 + cdb[0] = REQUEST_SENSE; /* opcode */
55651 + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
55652 +
55653 + /* Initialize sglist */
55654 + memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
55655 + sglist[0].length = TW_SECTOR_SIZE;
55656 + sglist[0].address = tw_dev->generic_buffer_phys[request_id];
55657 +
55658 + /* Mark internal command */
55659 + tw_dev->srb[request_id] = NULL;
55660 +
55661 + do {
55662 + /* Send command to the board */
55663 + if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
55664 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense");
55665 + goto out;
55666 + }
55667 +
55668 + /* Now poll for completion */
55669 + if (twl_poll_response(tw_dev, request_id, 30)) {
55670 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue");
55671 + tw_dev->posted_request_count--;
55672 + goto out;
55673 + }
55674 +
55675 + tw_dev->posted_request_count--;
55676 + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
55677 + aen = le16_to_cpu(header->status_block.error);
55678 + queue = 0;
55679 + count++;
55680 +
55681 + switch (aen) {
55682 + case TW_AEN_QUEUE_EMPTY:
55683 + if (first_reset != 1)
55684 + goto out;
55685 + else
55686 + finished = 1;
55687 + break;
55688 + case TW_AEN_SOFT_RESET:
55689 + if (first_reset == 0)
55690 + first_reset = 1;
55691 + else
55692 + queue = 1;
55693 + break;
55694 + case TW_AEN_SYNC_TIME_WITH_HOST:
55695 + break;
55696 + default:
55697 + queue = 1;
55698 + }
55699 +
55700 + /* Now queue an event info */
55701 + if (queue)
55702 + twl_aen_queue_event(tw_dev, header);
55703 + } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
55704 +
55705 + if (count == TW_MAX_AEN_DRAIN)
55706 + goto out;
55707 +
55708 + retval = 0;
55709 +out:
55710 + tw_dev->state[request_id] = TW_S_INITIAL;
55711 + return retval;
55712 +} /* End twl_aen_drain_queue() */
55713 +
55714 +/* This function will allocate memory and check if it is correctly aligned */
55715 +static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
55716 +{
55717 + int i;
55718 + dma_addr_t dma_handle;
55719 + unsigned long *cpu_addr;
55720 + int retval = 1;
55721 +
55722 + cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
55723 + if (!cpu_addr) {
55724 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
55725 + goto out;
55726 + }
55727 +
55728 + memset(cpu_addr, 0, size*TW_Q_LENGTH);
55729 +
55730 + for (i = 0; i < TW_Q_LENGTH; i++) {
55731 + switch(which) {
55732 + case 0:
55733 + tw_dev->command_packet_phys[i] = dma_handle+(i*size);
55734 + tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
55735 + break;
55736 + case 1:
55737 + tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
55738 + tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
55739 + break;
55740 + case 2:
55741 + tw_dev->sense_buffer_phys[i] = dma_handle+(i*size);
55742 + tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size));
55743 + break;
55744 + }
55745 + }
55746 + retval = 0;
55747 +out:
55748 + return retval;
55749 +} /* End twl_allocate_memory() */
55750 +
55751 +/* This function will load the request id and various sgls for ioctls */
55752 +static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
55753 +{
55754 + TW_Command *oldcommand;
55755 + TW_Command_Apache *newcommand;
55756 + TW_SG_Entry_ISO *sgl;
55757 + unsigned int pae = 0;
55758 +
55759 + if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
55760 + pae = 1;
55761 +
55762 + if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
55763 + newcommand = &full_command_packet->command.newcommand;
55764 + newcommand->request_id__lunl =
55765 + cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
55766 + if (length) {
55767 + newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
55768 + newcommand->sg_list[0].length = TW_CPU_TO_SGL(length);
55769 + }
55770 + newcommand->sgl_entries__lunh =
55771 + cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
55772 + } else {
55773 + oldcommand = &full_command_packet->command.oldcommand;
55774 + oldcommand->request_id = request_id;
55775 +
55776 + if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
55777 + /* Load the sg list */
55778 + sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
55779 + sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
55780 + sgl->length = TW_CPU_TO_SGL(length);
55781 + oldcommand->size += pae;
55782 + oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0;
55783 + }
55784 + }
55785 +} /* End twl_load_sgl() */
55786 +
55787 +/* This function handles ioctl for the character device
55788 + This interface is used by smartmontools open source software */
55789 +static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
55790 +{
55791 + long timeout;
55792 + unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
55793 + dma_addr_t dma_handle;
55794 + int request_id = 0;
55795 + TW_Ioctl_Driver_Command driver_command;
55796 + TW_Ioctl_Buf_Apache *tw_ioctl;
55797 + TW_Command_Full *full_command_packet;
55798 + TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
55799 + int retval = -EFAULT;
55800 + void __user *argp = (void __user *)arg;
55801 +
55802 + /* Only let one of these through at a time */
55803 + if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
55804 + retval = -EINTR;
55805 + goto out;
55806 + }
55807 +
55808 + /* First copy down the driver command */
55809 + if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
55810 + goto out2;
55811 +
55812 + /* Check data buffer size */
55813 + if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
55814 + retval = -EINVAL;
55815 + goto out2;
55816 + }
55817 +
55818 + /* Hardware can only do multiple of 512 byte transfers */
55819 + data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
55820 +
55821 + /* Now allocate ioctl buf memory */
55822 + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
55823 + if (!cpu_addr) {
55824 + retval = -ENOMEM;
55825 + goto out2;
55826 + }
55827 +
55828 + tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
55829 +
55830 + /* Now copy down the entire ioctl */
55831 + if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
55832 + goto out3;
55833 +
55834 + /* See which ioctl we are doing */
55835 + switch (cmd) {
55836 + case TW_IOCTL_FIRMWARE_PASS_THROUGH:
55837 + spin_lock_irqsave(tw_dev->host->host_lock, flags);
55838 + twl_get_request_id(tw_dev, &request_id);
55839 +
55840 + /* Flag internal command */
55841 + tw_dev->srb[request_id] = NULL;
55842 +
55843 + /* Flag chrdev ioctl */
55844 + tw_dev->chrdev_request_id = request_id;
55845 +
55846 + full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command;
55847 +
55848 + /* Load request id and sglist for both command types */
55849 + twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
55850 +
55851 + memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
55852 +
55853 + /* Now post the command packet to the controller */
55854 + twl_post_command_packet(tw_dev, request_id);
55855 + spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55856 +
55857 + timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
55858 +
55859 + /* Now wait for command to complete */
55860 + timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
55861 +
55862 + /* We timed out, and didn't get an interrupt */
55863 + if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
55864 + /* Now we need to reset the board */
55865 + printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
55866 + tw_dev->host->host_no, TW_DRIVER, 0x6,
55867 + cmd);
55868 + retval = -EIO;
55869 + twl_reset_device_extension(tw_dev, 1);
55870 + goto out3;
55871 + }
55872 +
55873 + /* Now copy in the command packet response */
55874 + memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
55875 +
55876 + /* Now complete the io */
55877 + spin_lock_irqsave(tw_dev->host->host_lock, flags);
55878 + tw_dev->posted_request_count--;
55879 + tw_dev->state[request_id] = TW_S_COMPLETED;
55880 + twl_free_request_id(tw_dev, request_id);
55881 + spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55882 + break;
55883 + default:
55884 + retval = -ENOTTY;
55885 + goto out3;
55886 + }
55887 +
55888 + /* Now copy the entire response to userspace */
55889 + if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
55890 + retval = 0;
55891 +out3:
55892 + /* Now free ioctl buf memory */
55893 + dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
55894 +out2:
55895 + mutex_unlock(&tw_dev->ioctl_lock);
55896 +out:
55897 + return retval;
55898 +} /* End twl_chrdev_ioctl() */
55899 +
55900 +/* This function handles open for the character device */
55901 +static int twl_chrdev_open(struct inode *inode, struct file *file)
55902 +{
55903 + unsigned int minor_number;
55904 + int retval = -ENODEV;
55905 +
55906 + if (!capable(CAP_SYS_ADMIN)) {
55907 + retval = -EACCES;
55908 + goto out;
55909 + }
55910 +
55911 + cycle_kernel_lock();
55912 + minor_number = iminor(inode);
55913 + if (minor_number >= twl_device_extension_count)
55914 + goto out;
55915 + retval = 0;
55916 +out:
55917 + return retval;
55918 +} /* End twl_chrdev_open() */
55919 +
55920 +/* File operations struct for character device */
55921 +static const struct file_operations twl_fops = {
55922 + .owner = THIS_MODULE,
55923 + .ioctl = twl_chrdev_ioctl,
55924 + .open = twl_chrdev_open,
55925 + .release = NULL
55926 +};
55927 +
55928 +/* This function passes sense data from firmware to scsi layer */
55929 +static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host)
55930 +{
55931 + TW_Command_Apache_Header *header;
55932 + TW_Command_Full *full_command_packet;
55933 + unsigned short error;
55934 + char *error_str;
55935 + int retval = 1;
55936 +
55937 + header = tw_dev->sense_buffer_virt[i];
55938 + full_command_packet = tw_dev->command_packet_virt[request_id];
55939 +
55940 + /* Get embedded firmware error string */
55941 + error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]);
55942 +
55943 + /* Don't print error for Logical unit not supported during rollcall */
55944 + error = le16_to_cpu(header->status_block.error);
55945 + if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) {
55946 + if (print_host)
55947 + printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
55948 + tw_dev->host->host_no,
55949 + TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
55950 + header->status_block.error,
55951 + error_str,
55952 + header->err_specific_desc);
55953 + else
55954 + printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n",
55955 + TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
55956 + header->status_block.error,
55957 + error_str,
55958 + header->err_specific_desc);
55959 + }
55960 +
55961 + if (copy_sense) {
55962 + memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH);
55963 + tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
55964 + goto out;
55965 + }
55966 +out:
55967 + return retval;
55968 +} /* End twl_fill_sense() */
55969 +
55970 +/* This function will free up device extension resources */
55971 +static void twl_free_device_extension(TW_Device_Extension *tw_dev)
55972 +{
55973 + if (tw_dev->command_packet_virt[0])
55974 + pci_free_consistent(tw_dev->tw_pci_dev,
55975 + sizeof(TW_Command_Full)*TW_Q_LENGTH,
55976 + tw_dev->command_packet_virt[0],
55977 + tw_dev->command_packet_phys[0]);
55978 +
55979 + if (tw_dev->generic_buffer_virt[0])
55980 + pci_free_consistent(tw_dev->tw_pci_dev,
55981 + TW_SECTOR_SIZE*TW_Q_LENGTH,
55982 + tw_dev->generic_buffer_virt[0],
55983 + tw_dev->generic_buffer_phys[0]);
55984 +
55985 + if (tw_dev->sense_buffer_virt[0])
55986 + pci_free_consistent(tw_dev->tw_pci_dev,
55987 + sizeof(TW_Command_Apache_Header)*
55988 + TW_Q_LENGTH,
55989 + tw_dev->sense_buffer_virt[0],
55990 + tw_dev->sense_buffer_phys[0]);
55991 +
55992 + kfree(tw_dev->event_queue[0]);
55993 +} /* End twl_free_device_extension() */
55994 +
55995 +/* This function will get parameter table entries from the firmware */
55996 +static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
55997 +{
55998 + TW_Command_Full *full_command_packet;
55999 + TW_Command *command_packet;
56000 + TW_Param_Apache *param;
56001 + void *retval = NULL;
56002 +
56003 + /* Setup the command packet */
56004 + full_command_packet = tw_dev->command_packet_virt[request_id];
56005 + memset(full_command_packet, 0, sizeof(TW_Command_Full));
56006 + command_packet = &full_command_packet->command.oldcommand;
56007 +
56008 + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
56009 + command_packet->size = TW_COMMAND_SIZE;
56010 + command_packet->request_id = request_id;
56011 + command_packet->byte6_offset.block_count = cpu_to_le16(1);
56012 +
56013 + /* Now setup the param */
56014 + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
56015 + memset(param, 0, TW_SECTOR_SIZE);
56016 + param->table_id = cpu_to_le16(table_id | 0x8000);
56017 + param->parameter_id = cpu_to_le16(parameter_id);
56018 + param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
56019 +
56020 + command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
56021 + command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
56022 +
56023 + /* Post the command packet to the board */
56024 + twl_post_command_packet(tw_dev, request_id);
56025 +
56026 + /* Poll for completion */
56027 + if (twl_poll_response(tw_dev, request_id, 30))
56028 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param")
56029 + else
56030 + retval = (void *)&(param->data[0]);
56031 +
56032 + tw_dev->posted_request_count--;
56033 + tw_dev->state[request_id] = TW_S_INITIAL;
56034 +
56035 + return retval;
56036 +} /* End twl_get_param() */
56037 +
56038 +/* This function will send an initconnection command to controller */
56039 +static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
56040 + u32 set_features, unsigned short current_fw_srl,
56041 + unsigned short current_fw_arch_id,
56042 + unsigned short current_fw_branch,
56043 + unsigned short current_fw_build,
56044 + unsigned short *fw_on_ctlr_srl,
56045 + unsigned short *fw_on_ctlr_arch_id,
56046 + unsigned short *fw_on_ctlr_branch,
56047 + unsigned short *fw_on_ctlr_build,
56048 + u32 *init_connect_result)
56049 +{
56050 + TW_Command_Full *full_command_packet;
56051 + TW_Initconnect *tw_initconnect;
56052 + int request_id = 0, retval = 1;
56053 +
56054 + /* Initialize InitConnection command packet */
56055 + full_command_packet = tw_dev->command_packet_virt[request_id];
56056 + memset(full_command_packet, 0, sizeof(TW_Command_Full));
56057 + full_command_packet->header.header_desc.size_header = 128;
56058 +
56059 + tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
56060 + tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
56061 + tw_initconnect->request_id = request_id;
56062 + tw_initconnect->message_credits = cpu_to_le16(message_credits);
56063 + tw_initconnect->features = set_features;
56064 +
56065 + /* Turn on 64-bit sgl support if we need to */
56066 + tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
56067 +
56068 + tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
56069 +
56070 + if (set_features & TW_EXTENDED_INIT_CONNECT) {
56071 + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
56072 + tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
56073 + tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
56074 + tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
56075 + tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
56076 + } else
56077 + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
56078 +
56079 + /* Send command packet to the board */
56080 + twl_post_command_packet(tw_dev, request_id);
56081 +
56082 + /* Poll for completion */
56083 + if (twl_poll_response(tw_dev, request_id, 30)) {
56084 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection");
56085 + } else {
56086 + if (set_features & TW_EXTENDED_INIT_CONNECT) {
56087 + *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
56088 + *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
56089 + *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
56090 + *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
56091 + *init_connect_result = le32_to_cpu(tw_initconnect->result);
56092 + }
56093 + retval = 0;
56094 + }
56095 +
56096 + tw_dev->posted_request_count--;
56097 + tw_dev->state[request_id] = TW_S_INITIAL;
56098 +
56099 + return retval;
56100 +} /* End twl_initconnection() */
56101 +
56102 +/* This function will initialize the fields of a device extension */
56103 +static int twl_initialize_device_extension(TW_Device_Extension *tw_dev)
56104 +{
56105 + int i, retval = 1;
56106 +
56107 + /* Initialize command packet buffers */
56108 + if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
56109 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed");
56110 + goto out;
56111 + }
56112 +
56113 + /* Initialize generic buffer */
56114 + if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
56115 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed");
56116 + goto out;
56117 + }
56118 +
56119 + /* Allocate sense buffers */
56120 + if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) {
56121 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed");
56122 + goto out;
56123 + }
56124 +
56125 + /* Allocate event info space */
56126 + tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
56127 + if (!tw_dev->event_queue[0]) {
56128 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed");
56129 + goto out;
56130 + }
56131 +
56132 + for (i = 0; i < TW_Q_LENGTH; i++) {
56133 + tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
56134 + tw_dev->free_queue[i] = i;
56135 + tw_dev->state[i] = TW_S_INITIAL;
56136 + }
56137 +
56138 + tw_dev->free_head = TW_Q_START;
56139 + tw_dev->free_tail = TW_Q_START;
56140 + tw_dev->error_sequence_id = 1;
56141 + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56142 +
56143 + mutex_init(&tw_dev->ioctl_lock);
56144 + init_waitqueue_head(&tw_dev->ioctl_wqueue);
56145 +
56146 + retval = 0;
56147 +out:
56148 + return retval;
56149 +} /* End twl_initialize_device_extension() */
56150 +
56151 +/* This function will perform a pci-dma unmap */
56152 +static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
56153 +{
56154 + struct scsi_cmnd *cmd = tw_dev->srb[request_id];
56155 +
56156 + if (cmd->SCp.phase == TW_PHASE_SGLIST)
56157 + scsi_dma_unmap(cmd);
56158 +} /* End twl_unmap_scsi_data() */
56159 +
56160 +/* This function will handle attention interrupts */
56161 +static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
56162 +{
56163 + int retval = 1;
56164 + u32 request_id, doorbell;
56165 +
56166 + /* Read doorbell status */
56167 + doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev));
56168 +
56169 + /* Check for controller errors */
56170 + if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) {
56171 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing");
56172 + goto out;
56173 + }
56174 +
56175 + /* Check if we need to perform an AEN drain */
56176 + if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) {
56177 + if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
56178 + twl_get_request_id(tw_dev, &request_id);
56179 + if (twl_aen_read_queue(tw_dev, request_id)) {
56180 + tw_dev->state[request_id] = TW_S_COMPLETED;
56181 + twl_free_request_id(tw_dev, request_id);
56182 + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
56183 + }
56184 + }
56185 + }
56186 +
56187 + retval = 0;
56188 +out:
56189 + /* Clear doorbell interrupt */
56190 + TWL_CLEAR_DB_INTERRUPT(tw_dev);
56191 +
56192 + /* Make sure the clear was flushed by reading it back */
56193 + readl(TWL_HOBDBC_REG_ADDR(tw_dev));
56194 +
56195 + return retval;
56196 +} /* End twl_handle_attention_interrupt() */
56197 +
56198 +/* Interrupt service routine */
56199 +static irqreturn_t twl_interrupt(int irq, void *dev_instance)
56200 +{
56201 + TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
56202 + int i, handled = 0, error = 0;
56203 + dma_addr_t mfa = 0;
56204 + u32 reg, regl, regh, response, request_id = 0;
56205 + struct scsi_cmnd *cmd;
56206 + TW_Command_Full *full_command_packet;
56207 +
56208 + spin_lock(tw_dev->host->host_lock);
56209 +
56210 + /* Read host interrupt status */
56211 + reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
56212 +
56213 + /* Check if this is our interrupt, otherwise bail */
56214 + if (!(reg & TWL_HISTATUS_VALID_INTERRUPT))
56215 + goto twl_interrupt_bail;
56216 +
56217 + handled = 1;
56218 +
56219 + /* If we are resetting, bail */
56220 + if (test_bit(TW_IN_RESET, &tw_dev->flags))
56221 + goto twl_interrupt_bail;
56222 +
56223 + /* Attention interrupt */
56224 + if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) {
56225 + if (twl_handle_attention_interrupt(tw_dev)) {
56226 + TWL_MASK_INTERRUPTS(tw_dev);
56227 + goto twl_interrupt_bail;
56228 + }
56229 + }
56230 +
56231 + /* Response interrupt */
56232 + while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) {
56233 + if (sizeof(dma_addr_t) > 4) {
56234 + regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
56235 + regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
56236 + mfa = ((u64)regh << 32) | regl;
56237 + } else
56238 + mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
56239 +
56240 + error = 0;
56241 + response = (u32)mfa;
56242 +
56243 + /* Check for command packet error */
56244 + if (!TW_NOTMFA_OUT(response)) {
56245 + for (i=0;i<TW_Q_LENGTH;i++) {
56246 + if (tw_dev->sense_buffer_phys[i] == mfa) {
56247 + request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id);
56248 + if (tw_dev->srb[request_id] != NULL)
56249 + error = twl_fill_sense(tw_dev, i, request_id, 1, 1);
56250 + else {
56251 + /* Skip ioctl error prints */
56252 + if (request_id != tw_dev->chrdev_request_id)
56253 + error = twl_fill_sense(tw_dev, i, request_id, 0, 1);
56254 + else
56255 + memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header));
56256 + }
56257 +
56258 + /* Now re-post the sense buffer */
56259 + writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
56260 + writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
56261 + break;
56262 + }
56263 + }
56264 + } else
56265 + request_id = TW_RESID_OUT(response);
56266 +
56267 + full_command_packet = tw_dev->command_packet_virt[request_id];
56268 +
56269 + /* Check for correct state */
56270 + if (tw_dev->state[request_id] != TW_S_POSTED) {
56271 + if (tw_dev->srb[request_id] != NULL) {
56272 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted");
56273 + TWL_MASK_INTERRUPTS(tw_dev);
56274 + goto twl_interrupt_bail;
56275 + }
56276 + }
56277 +
56278 + /* Check for internal command completion */
56279 + if (tw_dev->srb[request_id] == NULL) {
56280 + if (request_id != tw_dev->chrdev_request_id) {
56281 + if (twl_aen_complete(tw_dev, request_id))
56282 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt");
56283 + } else {
56284 + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56285 + wake_up(&tw_dev->ioctl_wqueue);
56286 + }
56287 + } else {
56288 + cmd = tw_dev->srb[request_id];
56289 +
56290 + if (!error)
56291 + cmd->result = (DID_OK << 16);
56292 +
56293 + /* Report residual bytes for single sgl */
56294 + if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
56295 + if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
56296 + scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
56297 + }
56298 +
56299 + /* Now complete the io */
56300 + tw_dev->state[request_id] = TW_S_COMPLETED;
56301 + twl_free_request_id(tw_dev, request_id);
56302 + tw_dev->posted_request_count--;
56303 + tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
56304 + twl_unmap_scsi_data(tw_dev, request_id);
56305 + }
56306 +
56307 + /* Check for another response interrupt */
56308 + reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
56309 + }
56310 +
56311 +twl_interrupt_bail:
56312 + spin_unlock(tw_dev->host->host_lock);
56313 + return IRQ_RETVAL(handled);
56314 +} /* End twl_interrupt() */
56315 +
56316 +/* This function will poll for a register change */
56317 +static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds)
56318 +{
56319 + unsigned long before;
56320 + int retval = 1;
56321 + u32 reg_value;
56322 +
56323 + reg_value = readl(reg);
56324 + before = jiffies;
56325 +
56326 + while ((reg_value & value) != result) {
56327 + reg_value = readl(reg);
56328 + if (time_after(jiffies, before + HZ * seconds))
56329 + goto out;
56330 + msleep(50);
56331 + }
56332 + retval = 0;
56333 +out:
56334 + return retval;
56335 +} /* End twl_poll_register() */
56336 +
56337 +/* This function will reset a controller */
56338 +static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
56339 +{
56340 + int retval = 1;
56341 + int i = 0;
56342 + u32 status = 0;
56343 + unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
56344 + unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
56345 + u32 init_connect_result = 0;
56346 + int tries = 0;
56347 + int do_soft_reset = soft_reset;
56348 +
56349 + while (tries < TW_MAX_RESET_TRIES) {
56350 + /* Do a soft reset if one is needed */
56351 + if (do_soft_reset) {
56352 + TWL_SOFT_RESET(tw_dev);
56353 +
56354 + /* Make sure controller is in a good state */
56355 + if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) {
56356 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence");
56357 + tries++;
56358 + continue;
56359 + }
56360 + if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) {
56361 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence");
56362 + tries++;
56363 + continue;
56364 + }
56365 + }
56366 +
56367 + /* Initconnect */
56368 + if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
56369 + TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
56370 + TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
56371 + TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
56372 + &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
56373 + &fw_on_ctlr_build, &init_connect_result)) {
56374 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL");
56375 + do_soft_reset = 1;
56376 + tries++;
56377 + continue;
56378 + }
56379 +
56380 + /* Load sense buffers */
56381 + while (i < TW_Q_LENGTH) {
56382 + writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
56383 + writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
56384 +
56385 + /* Check status for over-run after each write */
56386 + status = readl(TWL_STATUS_REG_ADDR(tw_dev));
56387 + if (!(status & TWL_STATUS_OVERRUN_SUBMIT))
56388 + i++;
56389 + }
56390 +
56391 + /* Now check status */
56392 + status = readl(TWL_STATUS_REG_ADDR(tw_dev));
56393 + if (status) {
56394 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers");
56395 + do_soft_reset = 1;
56396 + tries++;
56397 + continue;
56398 + }
56399 +
56400 + /* Drain the AEN queue */
56401 + if (twl_aen_drain_queue(tw_dev, soft_reset)) {
56402 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence");
56403 + do_soft_reset = 1;
56404 + tries++;
56405 + continue;
56406 + }
56407 +
56408 + /* Load rest of compatibility struct */
56409 + strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
56410 + tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
56411 + tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
56412 + tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
56413 + tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
56414 + tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
56415 + tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
56416 + tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
56417 + tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
56418 + tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
56419 +
56420 + /* If we got here, controller is in a good state */
56421 + retval = 0;
56422 + goto out;
56423 + }
56424 +out:
56425 + return retval;
56426 +} /* End twl_reset_sequence() */
56427 +
56428 +/* This function will reset a device extension */
56429 +static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
56430 +{
56431 + int i = 0, retval = 1;
56432 + unsigned long flags = 0;
56433 +
56434 + /* Block SCSI requests while we are resetting */
56435 + if (ioctl_reset)
56436 + scsi_block_requests(tw_dev->host);
56437 +
56438 + set_bit(TW_IN_RESET, &tw_dev->flags);
56439 + TWL_MASK_INTERRUPTS(tw_dev);
56440 + TWL_CLEAR_DB_INTERRUPT(tw_dev);
56441 +
56442 + spin_lock_irqsave(tw_dev->host->host_lock, flags);
56443 +
56444 + /* Abort all requests that are in progress */
56445 + for (i = 0; i < TW_Q_LENGTH; i++) {
56446 + if ((tw_dev->state[i] != TW_S_FINISHED) &&
56447 + (tw_dev->state[i] != TW_S_INITIAL) &&
56448 + (tw_dev->state[i] != TW_S_COMPLETED)) {
56449 + if (tw_dev->srb[i]) {
56450 + tw_dev->srb[i]->result = (DID_RESET << 16);
56451 + tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
56452 + twl_unmap_scsi_data(tw_dev, i);
56453 + }
56454 + }
56455 + }
56456 +
56457 + /* Reset queues and counts */
56458 + for (i = 0; i < TW_Q_LENGTH; i++) {
56459 + tw_dev->free_queue[i] = i;
56460 + tw_dev->state[i] = TW_S_INITIAL;
56461 + }
56462 + tw_dev->free_head = TW_Q_START;
56463 + tw_dev->free_tail = TW_Q_START;
56464 + tw_dev->posted_request_count = 0;
56465 +
56466 + spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
56467 +
56468 + if (twl_reset_sequence(tw_dev, 1))
56469 + goto out;
56470 +
56471 + TWL_UNMASK_INTERRUPTS(tw_dev);
56472 +
56473 + clear_bit(TW_IN_RESET, &tw_dev->flags);
56474 + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56475 +
56476 + retval = 0;
56477 +out:
56478 + if (ioctl_reset)
56479 + scsi_unblock_requests(tw_dev->host);
56480 + return retval;
56481 +} /* End twl_reset_device_extension() */
56482 +
56483 +/* This funciton returns unit geometry in cylinders/heads/sectors */
56484 +static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
56485 +{
56486 + int heads, sectors, cylinders;
56487 + TW_Device_Extension *tw_dev;
56488 +
56489 + tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
56490 +
56491 + if (capacity >= 0x200000) {
56492 + heads = 255;
56493 + sectors = 63;
56494 + cylinders = sector_div(capacity, heads * sectors);
56495 + } else {
56496 + heads = 64;
56497 + sectors = 32;
56498 + cylinders = sector_div(capacity, heads * sectors);
56499 + }
56500 +
56501 + geom[0] = heads;
56502 + geom[1] = sectors;
56503 + geom[2] = cylinders;
56504 +
56505 + return 0;
56506 +} /* End twl_scsi_biosparam() */
56507 +
56508 +/* This is the new scsi eh reset function */
56509 +static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt)
56510 +{
56511 + TW_Device_Extension *tw_dev = NULL;
56512 + int retval = FAILED;
56513 +
56514 + tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
56515 +
56516 + tw_dev->num_resets++;
56517 +
56518 + sdev_printk(KERN_WARNING, SCpnt->device,
56519 + "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
56520 + TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
56521 +
56522 + /* Make sure we are not issuing an ioctl or resetting from ioctl */
56523 + mutex_lock(&tw_dev->ioctl_lock);
56524 +
56525 + /* Now reset the card and some of the device extension data */
56526 + if (twl_reset_device_extension(tw_dev, 0)) {
56527 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset");
56528 + goto out;
56529 + }
56530 +
56531 + retval = SUCCESS;
56532 +out:
56533 + mutex_unlock(&tw_dev->ioctl_lock);
56534 + return retval;
56535 +} /* End twl_scsi_eh_reset() */
56536 +
56537 +/* This is the main scsi queue function to handle scsi opcodes */
56538 +static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
56539 +{
56540 + int request_id, retval;
56541 + TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
56542 +
56543 + /* If we are resetting due to timed out ioctl, report as busy */
56544 + if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
56545 + retval = SCSI_MLQUEUE_HOST_BUSY;
56546 + goto out;
56547 + }
56548 +
56549 + /* Save done function into scsi_cmnd struct */
56550 + SCpnt->scsi_done = done;
56551 +
56552 + /* Get a free request id */
56553 + twl_get_request_id(tw_dev, &request_id);
56554 +
56555 + /* Save the scsi command for use by the ISR */
56556 + tw_dev->srb[request_id] = SCpnt;
56557 +
56558 + /* Initialize phase to zero */
56559 + SCpnt->SCp.phase = TW_PHASE_INITIAL;
56560 +
56561 + retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
56562 + if (retval) {
56563 + tw_dev->state[request_id] = TW_S_COMPLETED;
56564 + twl_free_request_id(tw_dev, request_id);
56565 + SCpnt->result = (DID_ERROR << 16);
56566 + done(SCpnt);
56567 + retval = 0;
56568 + }
56569 +out:
56570 + return retval;
56571 +} /* End twl_scsi_queue() */
56572 +
56573 +/* This function tells the controller to shut down */
56574 +static void __twl_shutdown(TW_Device_Extension *tw_dev)
56575 +{
56576 + /* Disable interrupts */
56577 + TWL_MASK_INTERRUPTS(tw_dev);
56578 +
56579 + /* Free up the IRQ */
56580 + free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
56581 +
56582 + printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no);
56583 +
56584 + /* Tell the card we are shutting down */
56585 + if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
56586 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed");
56587 + } else {
56588 + printk(KERN_WARNING "3w-sas: Shutdown complete.\n");
56589 + }
56590 +
56591 + /* Clear doorbell interrupt just before exit */
56592 + TWL_CLEAR_DB_INTERRUPT(tw_dev);
56593 +} /* End __twl_shutdown() */
56594 +
56595 +/* Wrapper for __twl_shutdown */
56596 +static void twl_shutdown(struct pci_dev *pdev)
56597 +{
56598 + struct Scsi_Host *host = pci_get_drvdata(pdev);
56599 + TW_Device_Extension *tw_dev;
56600 +
56601 + if (!host)
56602 + return;
56603 +
56604 + tw_dev = (TW_Device_Extension *)host->hostdata;
56605 +
56606 + if (tw_dev->online)
56607 + __twl_shutdown(tw_dev);
56608 +} /* End twl_shutdown() */
56609 +
56610 +/* This function configures unit settings when a unit is coming on-line */
56611 +static int twl_slave_configure(struct scsi_device *sdev)
56612 +{
56613 + /* Force 60 second timeout */
56614 + blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
56615 +
56616 + return 0;
56617 +} /* End twl_slave_configure() */
56618 +
56619 +/* scsi_host_template initializer */
56620 +static struct scsi_host_template driver_template = {
56621 + .module = THIS_MODULE,
56622 + .name = "3w-sas",
56623 + .queuecommand = twl_scsi_queue,
56624 + .eh_host_reset_handler = twl_scsi_eh_reset,
56625 + .bios_param = twl_scsi_biosparam,
56626 + .change_queue_depth = twl_change_queue_depth,
56627 + .can_queue = TW_Q_LENGTH-2,
56628 + .slave_configure = twl_slave_configure,
56629 + .this_id = -1,
56630 + .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
56631 + .max_sectors = TW_MAX_SECTORS,
56632 + .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
56633 + .use_clustering = ENABLE_CLUSTERING,
56634 + .shost_attrs = twl_host_attrs,
56635 + .emulated = 1
56636 +};
56637 +
56638 +/* This function will probe and initialize a card */
56639 +static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
56640 +{
56641 + struct Scsi_Host *host = NULL;
56642 + TW_Device_Extension *tw_dev;
56643 + resource_size_t mem_addr, mem_len;
56644 + int retval = -ENODEV;
56645 + int *ptr_phycount, phycount=0;
56646 +
56647 + retval = pci_enable_device(pdev);
56648 + if (retval) {
56649 + TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device");
56650 + goto out_disable_device;
56651 + }
56652 +
56653 + pci_set_master(pdev);
56654 + pci_try_set_mwi(pdev);
56655 +
56656 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
56657 + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
56658 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
56659 + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
56660 + TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
56661 + retval = -ENODEV;
56662 + goto out_disable_device;
56663 + }
56664 +
56665 + host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
56666 + if (!host) {
56667 + TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension");
56668 + retval = -ENOMEM;
56669 + goto out_disable_device;
56670 + }
56671 + tw_dev = (TW_Device_Extension *)host->hostdata;
56672 +
56673 + /* Save values to device extension */
56674 + tw_dev->host = host;
56675 + tw_dev->tw_pci_dev = pdev;
56676 +
56677 + if (twl_initialize_device_extension(tw_dev)) {
56678 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
56679 + goto out_free_device_extension;
56680 + }
56681 +
56682 + /* Request IO regions */
56683 + retval = pci_request_regions(pdev, "3w-sas");
56684 + if (retval) {
56685 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region");
56686 + goto out_free_device_extension;
56687 + }
56688 +
56689 + /* Use region 1 */
56690 + mem_addr = pci_resource_start(pdev, 1);
56691 + mem_len = pci_resource_len(pdev, 1);
56692 +
56693 + /* Save base address */
56694 + tw_dev->base_addr = ioremap(mem_addr, mem_len);
56695 +
56696 + if (!tw_dev->base_addr) {
56697 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
56698 + goto out_release_mem_region;
56699 + }
56700 +
56701 + /* Disable interrupts on the card */
56702 + TWL_MASK_INTERRUPTS(tw_dev);
56703 +
56704 + /* Initialize the card */
56705 + if (twl_reset_sequence(tw_dev, 0)) {
56706 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
56707 + goto out_iounmap;
56708 + }
56709 +
56710 + /* Set host specific parameters */
56711 + host->max_id = TW_MAX_UNITS;
56712 + host->max_cmd_len = TW_MAX_CDB_LEN;
56713 + host->max_lun = TW_MAX_LUNS;
56714 + host->max_channel = 0;
56715 +
56716 + /* Register the card with the kernel SCSI layer */
56717 + retval = scsi_add_host(host, &pdev->dev);
56718 + if (retval) {
56719 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed");
56720 + goto out_iounmap;
56721 + }
56722 +
56723 + pci_set_drvdata(pdev, host);
56724 +
56725 + printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n",
56726 + host->host_no,
56727 + (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
56728 + TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH),
56729 + (u64)mem_addr, pdev->irq);
56730 +
56731 + ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE,
56732 + TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH);
56733 + if (ptr_phycount)
56734 + phycount = le32_to_cpu(*(int *)ptr_phycount);
56735 +
56736 + printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n",
56737 + host->host_no,
56738 + (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
56739 + TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
56740 + (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE,
56741 + TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
56742 + phycount);
56743 +
56744 + /* Try to enable MSI */
56745 + if (use_msi && !pci_enable_msi(pdev))
56746 + set_bit(TW_USING_MSI, &tw_dev->flags);
56747 +
56748 + /* Now setup the interrupt handler */
56749 + retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
56750 + if (retval) {
56751 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ");
56752 + goto out_remove_host;
56753 + }
56754 +
56755 + twl_device_extension_list[twl_device_extension_count] = tw_dev;
56756 + twl_device_extension_count++;
56757 +
56758 + /* Re-enable interrupts on the card */
56759 + TWL_UNMASK_INTERRUPTS(tw_dev);
56760 +
56761 + /* Finally, scan the host */
56762 + scsi_scan_host(host);
56763 +
56764 + /* Add sysfs binary files */
56765 + if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr))
56766 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read");
56767 + if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr))
56768 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info");
56769 +
56770 + if (twl_major == -1) {
56771 + if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0)
56772 + TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device");
56773 + }
56774 + tw_dev->online = 1;
56775 + return 0;
56776 +
56777 +out_remove_host:
56778 + if (test_bit(TW_USING_MSI, &tw_dev->flags))
56779 + pci_disable_msi(pdev);
56780 + scsi_remove_host(host);
56781 +out_iounmap:
56782 + iounmap(tw_dev->base_addr);
56783 +out_release_mem_region:
56784 + pci_release_regions(pdev);
56785 +out_free_device_extension:
56786 + twl_free_device_extension(tw_dev);
56787 + scsi_host_put(host);
56788 +out_disable_device:
56789 + pci_disable_device(pdev);
56790 +
56791 + return retval;
56792 +} /* End twl_probe() */
56793 +
56794 +/* This function is called to remove a device */
56795 +static void twl_remove(struct pci_dev *pdev)
56796 +{
56797 + struct Scsi_Host *host = pci_get_drvdata(pdev);
56798 + TW_Device_Extension *tw_dev;
56799 +
56800 + if (!host)
56801 + return;
56802 +
56803 + tw_dev = (TW_Device_Extension *)host->hostdata;
56804 +
56805 + if (!tw_dev->online)
56806 + return;
56807 +
56808 + /* Remove sysfs binary files */
56809 + sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr);
56810 + sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr);
56811 +
56812 + scsi_remove_host(tw_dev->host);
56813 +
56814 + /* Unregister character device */
56815 + if (twl_major >= 0) {
56816 + unregister_chrdev(twl_major, "twl");
56817 + twl_major = -1;
56818 + }
56819 +
56820 + /* Shutdown the card */
56821 + __twl_shutdown(tw_dev);
56822 +
56823 + /* Disable MSI if enabled */
56824 + if (test_bit(TW_USING_MSI, &tw_dev->flags))
56825 + pci_disable_msi(pdev);
56826 +
56827 + /* Free IO remapping */
56828 + iounmap(tw_dev->base_addr);
56829 +
56830 + /* Free up the mem region */
56831 + pci_release_regions(pdev);
56832 +
56833 + /* Free up device extension resources */
56834 + twl_free_device_extension(tw_dev);
56835 +
56836 + scsi_host_put(tw_dev->host);
56837 + pci_disable_device(pdev);
56838 + twl_device_extension_count--;
56839 +} /* End twl_remove() */
56840 +
56841 +#ifdef CONFIG_PM
56842 +/* This function is called on PCI suspend */
56843 +static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
56844 +{
56845 + struct Scsi_Host *host = pci_get_drvdata(pdev);
56846 + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
56847 +
56848 + printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no);
56849 + /* Disable interrupts */
56850 + TWL_MASK_INTERRUPTS(tw_dev);
56851 +
56852 + free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
56853 +
56854 + /* Tell the card we are shutting down */
56855 + if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
56856 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend");
56857 + } else {
56858 + printk(KERN_WARNING "3w-sas: Suspend complete.\n");
56859 + }
56860 +
56861 + /* Clear doorbell interrupt */
56862 + TWL_CLEAR_DB_INTERRUPT(tw_dev);
56863 +
56864 + pci_save_state(pdev);
56865 + pci_disable_device(pdev);
56866 + pci_set_power_state(pdev, pci_choose_state(pdev, state));
56867 +
56868 + return 0;
56869 +} /* End twl_suspend() */
56870 +
56871 +/* This function is called on PCI resume */
56872 +static int twl_resume(struct pci_dev *pdev)
56873 +{
56874 + int retval = 0;
56875 + struct Scsi_Host *host = pci_get_drvdata(pdev);
56876 + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
56877 +
56878 + printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no);
56879 + pci_set_power_state(pdev, PCI_D0);
56880 + pci_enable_wake(pdev, PCI_D0, 0);
56881 + pci_restore_state(pdev);
56882 +
56883 + retval = pci_enable_device(pdev);
56884 + if (retval) {
56885 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume");
56886 + return retval;
56887 + }
56888 +
56889 + pci_set_master(pdev);
56890 + pci_try_set_mwi(pdev);
56891 +
56892 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
56893 + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
56894 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
56895 + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
56896 + TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
56897 + retval = -ENODEV;
56898 + goto out_disable_device;
56899 + }
56900 +
56901 + /* Initialize the card */
56902 + if (twl_reset_sequence(tw_dev, 0)) {
56903 + retval = -ENODEV;
56904 + goto out_disable_device;
56905 + }
56906 +
56907 + /* Now setup the interrupt handler */
56908 + retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
56909 + if (retval) {
56910 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume");
56911 + retval = -ENODEV;
56912 + goto out_disable_device;
56913 + }
56914 +
56915 + /* Now enable MSI if enabled */
56916 + if (test_bit(TW_USING_MSI, &tw_dev->flags))
56917 + pci_enable_msi(pdev);
56918 +
56919 + /* Re-enable interrupts on the card */
56920 + TWL_UNMASK_INTERRUPTS(tw_dev);
56921 +
56922 + printk(KERN_WARNING "3w-sas: Resume complete.\n");
56923 + return 0;
56924 +
56925 +out_disable_device:
56926 + scsi_remove_host(host);
56927 + pci_disable_device(pdev);
56928 +
56929 + return retval;
56930 +} /* End twl_resume() */
56931 +#endif
56932 +
56933 +/* PCI Devices supported by this driver */
56934 +static struct pci_device_id twl_pci_tbl[] __devinitdata = {
56935 + { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9750,
56936 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56937 + { }
56938 +};
56939 +MODULE_DEVICE_TABLE(pci, twl_pci_tbl);
56940 +
56941 +/* pci_driver initializer */
56942 +static struct pci_driver twl_driver = {
56943 + .name = "3w-sas",
56944 + .id_table = twl_pci_tbl,
56945 + .probe = twl_probe,
56946 + .remove = twl_remove,
56947 +#ifdef CONFIG_PM
56948 + .suspend = twl_suspend,
56949 + .resume = twl_resume,
56950 +#endif
56951 + .shutdown = twl_shutdown
56952 +};
56953 +
56954 +/* This function is called on driver initialization */
56955 +static int __init twl_init(void)
56956 +{
56957 + printk(KERN_WARNING "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
56958 +
56959 + return pci_register_driver(&twl_driver);
56960 +} /* End twl_init() */
56961 +
56962 +/* This function is called on driver exit */
56963 +static void __exit twl_exit(void)
56964 +{
56965 + pci_unregister_driver(&twl_driver);
56966 +} /* End twl_exit() */
56967 +
56968 +module_init(twl_init);
56969 +module_exit(twl_exit);
56970 +
56971 diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
56972 new file mode 100644
56973 index 0000000..e620505
56974 --- /dev/null
56975 +++ b/drivers/scsi/3w-sas.h
56976 @@ -0,0 +1,396 @@
56977 +/*
56978 + 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
56979 +
56980 + Written By: Adam Radford <linuxraid@lsi.com>
56981 +
56982 + Copyright (C) 2009 LSI Corporation.
56983 +
56984 + This program is free software; you can redistribute it and/or modify
56985 + it under the terms of the GNU General Public License as published by
56986 + the Free Software Foundation; version 2 of the License.
56987 +
56988 + This program is distributed in the hope that it will be useful,
56989 + but WITHOUT ANY WARRANTY; without even the implied warranty of
56990 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
56991 + GNU General Public License for more details.
56992 +
56993 + NO WARRANTY
56994 + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
56995 + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
56996 + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
56997 + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
56998 + solely responsible for determining the appropriateness of using and
56999 + distributing the Program and assumes all risks associated with its
57000 + exercise of rights under this Agreement, including but not limited to
57001 + the risks and costs of program errors, damage to or loss of data,
57002 + programs or equipment, and unavailability or interruption of operations.
57003 +
57004 + DISCLAIMER OF LIABILITY
57005 + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
57006 + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57007 + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
57008 + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
57009 + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
57010 + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
57011 + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
57012 +
57013 + You should have received a copy of the GNU General Public License
57014 + along with this program; if not, write to the Free Software
57015 + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
57016 +
57017 + Bugs/Comments/Suggestions should be mailed to:
57018 + linuxraid@lsi.com
57019 +
57020 + For more information, goto:
57021 + http://www.lsi.com
57022 +*/
57023 +
57024 +#ifndef _3W_SAS_H
57025 +#define _3W_SAS_H
57026 +
57027 +/* AEN severity table */
57028 +static char *twl_aen_severity_table[] =
57029 +{
57030 + "None", "ERROR", "WARNING", "INFO", "DEBUG", (char*) 0
57031 +};
57032 +
57033 +/* Liberator register offsets */
57034 +#define TWL_STATUS 0x0 /* Status */
57035 +#define TWL_HIBDB 0x20 /* Inbound doorbell */
57036 +#define TWL_HISTAT 0x30 /* Host interrupt status */
57037 +#define TWL_HIMASK 0x34 /* Host interrupt mask */
57038 +#define TWL_HOBDB 0x9C /* Outbound doorbell */
57039 +#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
57040 +#define TWL_SCRPD3 0xBC /* Scratchpad */
57041 +#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
57042 +#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
57043 +#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
57044 +#define TWL_HOBQPH 0xCC /* Host outbound Q high */
57045 +#define TWL_HISTATUS_VALID_INTERRUPT 0xC
57046 +#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4
57047 +#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8
57048 +#define TWL_STATUS_OVERRUN_SUBMIT 0x2000
57049 +#define TWL_ISSUE_SOFT_RESET 0x100
57050 +#define TWL_CONTROLLER_READY 0x2000
57051 +#define TWL_DOORBELL_CONTROLLER_ERROR 0x200000
57052 +#define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000
57053 +#define TWL_PULL_MODE 0x1
57054 +
57055 +/* Command packet opcodes used by the driver */
57056 +#define TW_OP_INIT_CONNECTION 0x1
57057 +#define TW_OP_GET_PARAM 0x12
57058 +#define TW_OP_SET_PARAM 0x13
57059 +#define TW_OP_EXECUTE_SCSI 0x10
57060 +
57061 +/* Asynchronous Event Notification (AEN) codes used by the driver */
57062 +#define TW_AEN_QUEUE_EMPTY 0x0000
57063 +#define TW_AEN_SOFT_RESET 0x0001
57064 +#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
57065 +#define TW_AEN_SEVERITY_ERROR 0x1
57066 +#define TW_AEN_SEVERITY_DEBUG 0x4
57067 +#define TW_AEN_NOT_RETRIEVED 0x1
57068 +
57069 +/* Command state defines */
57070 +#define TW_S_INITIAL 0x1 /* Initial state */
57071 +#define TW_S_STARTED 0x2 /* Id in use */
57072 +#define TW_S_POSTED 0x4 /* Posted to the controller */
57073 +#define TW_S_COMPLETED 0x8 /* Completed by isr */
57074 +#define TW_S_FINISHED 0x10 /* I/O completely done */
57075 +
57076 +/* Compatibility defines */
57077 +#define TW_9750_ARCH_ID 10
57078 +#define TW_CURRENT_DRIVER_SRL 40
57079 +#define TW_CURRENT_DRIVER_BUILD 0
57080 +#define TW_CURRENT_DRIVER_BRANCH 0
57081 +
57082 +/* Phase defines */
57083 +#define TW_PHASE_INITIAL 0
57084 +#define TW_PHASE_SGLIST 2
57085 +
57086 +/* Misc defines */
57087 +#define TW_SECTOR_SIZE 512
57088 +#define TW_MAX_UNITS 32
57089 +#define TW_INIT_MESSAGE_CREDITS 0x100
57090 +#define TW_INIT_COMMAND_PACKET_SIZE 0x3
57091 +#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6
57092 +#define TW_EXTENDED_INIT_CONNECT 0x2
57093 +#define TW_BASE_FW_SRL 24
57094 +#define TW_BASE_FW_BRANCH 0
57095 +#define TW_BASE_FW_BUILD 1
57096 +#define TW_Q_LENGTH 256
57097 +#define TW_Q_START 0
57098 +#define TW_MAX_SLOT 32
57099 +#define TW_MAX_RESET_TRIES 2
57100 +#define TW_MAX_CMDS_PER_LUN 254
57101 +#define TW_MAX_AEN_DRAIN 255
57102 +#define TW_IN_RESET 2
57103 +#define TW_USING_MSI 3
57104 +#define TW_IN_ATTENTION_LOOP 4
57105 +#define TW_MAX_SECTORS 256
57106 +#define TW_MAX_CDB_LEN 16
57107 +#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
57108 +#define TW_IOCTL_CHRDEV_FREE -1
57109 +#define TW_COMMAND_OFFSET 128 /* 128 bytes */
57110 +#define TW_VERSION_TABLE 0x0402
57111 +#define TW_TIMEKEEP_TABLE 0x040A
57112 +#define TW_INFORMATION_TABLE 0x0403
57113 +#define TW_PARAM_FWVER 3
57114 +#define TW_PARAM_FWVER_LENGTH 16
57115 +#define TW_PARAM_BIOSVER 4
57116 +#define TW_PARAM_BIOSVER_LENGTH 16
57117 +#define TW_PARAM_MODEL 8
57118 +#define TW_PARAM_MODEL_LENGTH 16
57119 +#define TW_PARAM_PHY_SUMMARY_TABLE 1
57120 +#define TW_PARAM_PHYCOUNT 2
57121 +#define TW_PARAM_PHYCOUNT_LENGTH 1
57122 +#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
57123 +#define TW_ALLOCATION_LENGTH 128
57124 +#define TW_SENSE_DATA_LENGTH 18
57125 +#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
57126 +#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d
57127 +#define TW_ERROR_UNIT_OFFLINE 0x128
57128 +#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
57129 +#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
57130 +#define TW_DRIVER 6
57131 +#ifndef PCI_DEVICE_ID_3WARE_9750
57132 +#define PCI_DEVICE_ID_3WARE_9750 0x1010
57133 +#endif
57134 +
57135 +/* Bitmask macros to eliminate bitfields */
57136 +
57137 +/* opcode: 5, reserved: 3 */
57138 +#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f))
57139 +#define TW_OP_OUT(x) (x & 0x1f)
57140 +
57141 +/* opcode: 5, sgloffset: 3 */
57142 +#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f))
57143 +#define TW_SGL_OUT(x) ((x >> 5) & 0x7)
57144 +
57145 +/* severity: 3, reserved: 5 */
57146 +#define TW_SEV_OUT(x) (x & 0x7)
57147 +
57148 +/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */
57149 +#define TW_RESID_OUT(x) ((x >> 16) & 0xffff)
57150 +#define TW_NOTMFA_OUT(x) (x & 0x1)
57151 +
57152 +/* request_id: 12, lun: 4 */
57153 +#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
57154 +#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
57155 +
57156 +/* Register access macros */
57157 +#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
57158 +#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
57159 +#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
57160 +#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
57161 +#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
57162 +#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
57163 +#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
57164 +#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
57165 +#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
57166 +#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
57167 +#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
57168 +#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
57169 +#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
57170 +#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
57171 +#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
57172 +
57173 +/* Macros */
57174 +#define TW_PRINTK(h,a,b,c) { \
57175 +if (h) \
57176 +printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \
57177 +else \
57178 +printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
57179 +}
57180 +#define TW_MAX_LUNS 16
57181 +#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4)
57182 +#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92)
57183 +#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94)
57184 +#define TW_PADDING_LENGTH_LIBERATOR 136
57185 +#define TW_PADDING_LENGTH_LIBERATOR_OLD 132
57186 +#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
57187 +
57188 +#pragma pack(1)
57189 +
57190 +/* SGL entry */
57191 +typedef struct TAG_TW_SG_Entry_ISO {
57192 + dma_addr_t address;
57193 + dma_addr_t length;
57194 +} TW_SG_Entry_ISO;
57195 +
57196 +/* Old Command Packet with ISO SGL */
57197 +typedef struct TW_Command {
57198 + unsigned char opcode__sgloffset;
57199 + unsigned char size;
57200 + unsigned char request_id;
57201 + unsigned char unit__hostid;
57202 + /* Second DWORD */
57203 + unsigned char status;
57204 + unsigned char flags;
57205 + union {
57206 + unsigned short block_count;
57207 + unsigned short parameter_count;
57208 + } byte6_offset;
57209 + union {
57210 + struct {
57211 + u32 lba;
57212 + TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
57213 + unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD];
57214 + } io;
57215 + struct {
57216 + TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
57217 + u32 padding;
57218 + unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD];
57219 + } param;
57220 + } byte8_offset;
57221 +} TW_Command;
57222 +
57223 +/* New Command Packet with ISO SGL */
57224 +typedef struct TAG_TW_Command_Apache {
57225 + unsigned char opcode__reserved;
57226 + unsigned char unit;
57227 + unsigned short request_id__lunl;
57228 + unsigned char status;
57229 + unsigned char sgl_offset;
57230 + unsigned short sgl_entries__lunh;
57231 + unsigned char cdb[16];
57232 + TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH];
57233 + unsigned char padding[TW_PADDING_LENGTH_LIBERATOR];
57234 +} TW_Command_Apache;
57235 +
57236 +/* New command packet header */
57237 +typedef struct TAG_TW_Command_Apache_Header {
57238 + unsigned char sense_data[TW_SENSE_DATA_LENGTH];
57239 + struct {
57240 + char reserved[4];
57241 + unsigned short error;
57242 + unsigned char padding;
57243 + unsigned char severity__reserved;
57244 + } status_block;
57245 + unsigned char err_specific_desc[98];
57246 + struct {
57247 + unsigned char size_header;
57248 + unsigned short request_id;
57249 + unsigned char size_sense;
57250 + } header_desc;
57251 +} TW_Command_Apache_Header;
57252 +
57253 +/* This struct is a union of the 2 command packets */
57254 +typedef struct TAG_TW_Command_Full {
57255 + TW_Command_Apache_Header header;
57256 + union {
57257 + TW_Command oldcommand;
57258 + TW_Command_Apache newcommand;
57259 + } command;
57260 +} TW_Command_Full;
57261 +
57262 +/* Initconnection structure */
57263 +typedef struct TAG_TW_Initconnect {
57264 + unsigned char opcode__reserved;
57265 + unsigned char size;
57266 + unsigned char request_id;
57267 + unsigned char res2;
57268 + unsigned char status;
57269 + unsigned char flags;
57270 + unsigned short message_credits;
57271 + u32 features;
57272 + unsigned short fw_srl;
57273 + unsigned short fw_arch_id;
57274 + unsigned short fw_branch;
57275 + unsigned short fw_build;
57276 + u32 result;
57277 +} TW_Initconnect;
57278 +
57279 +/* Event info structure */
57280 +typedef struct TAG_TW_Event
57281 +{
57282 + unsigned int sequence_id;
57283 + unsigned int time_stamp_sec;
57284 + unsigned short aen_code;
57285 + unsigned char severity;
57286 + unsigned char retrieved;
57287 + unsigned char repeat_count;
57288 + unsigned char parameter_len;
57289 + unsigned char parameter_data[98];
57290 +} TW_Event;
57291 +
57292 +typedef struct TAG_TW_Ioctl_Driver_Command {
57293 + unsigned int control_code;
57294 + unsigned int status;
57295 + unsigned int unique_id;
57296 + unsigned int sequence_id;
57297 + unsigned int os_specific;
57298 + unsigned int buffer_length;
57299 +} TW_Ioctl_Driver_Command;
57300 +
57301 +typedef struct TAG_TW_Ioctl_Apache {
57302 + TW_Ioctl_Driver_Command driver_command;
57303 + char padding[488];
57304 + TW_Command_Full firmware_command;
57305 + char data_buffer[1];
57306 +} TW_Ioctl_Buf_Apache;
57307 +
57308 +/* GetParam descriptor */
57309 +typedef struct {
57310 + unsigned short table_id;
57311 + unsigned short parameter_id;
57312 + unsigned short parameter_size_bytes;
57313 + unsigned short actual_parameter_size_bytes;
57314 + unsigned char data[1];
57315 +} TW_Param_Apache;
57316 +
57317 +/* Compatibility information structure */
57318 +typedef struct TAG_TW_Compatibility_Info
57319 +{
57320 + char driver_version[32];
57321 + unsigned short working_srl;
57322 + unsigned short working_branch;
57323 + unsigned short working_build;
57324 + unsigned short driver_srl_high;
57325 + unsigned short driver_branch_high;
57326 + unsigned short driver_build_high;
57327 + unsigned short driver_srl_low;
57328 + unsigned short driver_branch_low;
57329 + unsigned short driver_build_low;
57330 + unsigned short fw_on_ctlr_srl;
57331 + unsigned short fw_on_ctlr_branch;
57332 + unsigned short fw_on_ctlr_build;
57333 +} TW_Compatibility_Info;
57334 +
57335 +#pragma pack()
57336 +
57337 +typedef struct TAG_TW_Device_Extension {
57338 + void __iomem *base_addr;
57339 + unsigned long *generic_buffer_virt[TW_Q_LENGTH];
57340 + dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
57341 + TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
57342 + dma_addr_t command_packet_phys[TW_Q_LENGTH];
57343 + TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
57344 + dma_addr_t sense_buffer_phys[TW_Q_LENGTH];
57345 + struct pci_dev *tw_pci_dev;
57346 + struct scsi_cmnd *srb[TW_Q_LENGTH];
57347 + unsigned char free_queue[TW_Q_LENGTH];
57348 + unsigned char free_head;
57349 + unsigned char free_tail;
57350 + int state[TW_Q_LENGTH];
57351 + unsigned int posted_request_count;
57352 + unsigned int max_posted_request_count;
57353 + unsigned int max_sgl_entries;
57354 + unsigned int sgl_entries;
57355 + unsigned int num_resets;
57356 + unsigned int sector_count;
57357 + unsigned int max_sector_count;
57358 + unsigned int aen_count;
57359 + struct Scsi_Host *host;
57360 + long flags;
57361 + TW_Event *event_queue[TW_Q_LENGTH];
57362 + unsigned char error_index;
57363 + unsigned int error_sequence_id;
57364 + int chrdev_request_id;
57365 + wait_queue_head_t ioctl_wqueue;
57366 + struct mutex ioctl_lock;
57367 + TW_Compatibility_Info tw_compat_info;
57368 + char online;
57369 +} TW_Device_Extension;
57370 +
57371 +#endif /* _3W_SAS_H */
57372 +
57373 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
57374 index 1ddcf40..a85f062 100644
57375 --- a/drivers/scsi/BusLogic.c
57376 +++ b/drivers/scsi/BusLogic.c
57377 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
57378 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
57379 *PrototypeHostAdapter)
57380 {
57381 + pax_track_stack();
57382 +
57383 /*
57384 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
57385 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
57386 diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
57387 index e11cca4..4295679 100644
57388 --- a/drivers/scsi/Kconfig
57389 +++ b/drivers/scsi/Kconfig
57390 @@ -399,6 +399,17 @@ config SCSI_3W_9XXX
57391 Please read the comments at the top of
57392 <file:drivers/scsi/3w-9xxx.c>.
57393
57394 +config SCSI_3W_SAS
57395 + tristate "3ware 97xx SAS/SATA-RAID support"
57396 + depends on PCI && SCSI
57397 + help
57398 + This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards.
57399 +
57400 + <http://www.lsi.com>
57401 +
57402 + Please read the comments at the top of
57403 + <file:drivers/scsi/3w-sas.c>.
57404 +
57405 config SCSI_7000FASST
57406 tristate "7000FASST SCSI support"
57407 depends on ISA && SCSI && ISA_DMA_API
57408 @@ -621,6 +632,14 @@ config SCSI_FLASHPOINT
57409 substantial, so users of MultiMaster Host Adapters may not
57410 wish to include it.
57411
57412 +config VMWARE_PVSCSI
57413 + tristate "VMware PVSCSI driver support"
57414 + depends on PCI && SCSI && X86
57415 + help
57416 + This driver supports VMware's para virtualized SCSI HBA.
57417 + To compile this driver as a module, choose M here: the
57418 + module will be called vmw_pvscsi.
57419 +
57420 config LIBFC
57421 tristate "LibFC module"
57422 select SCSI_FC_ATTRS
57423 diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
57424 index 3ad61db..c938975 100644
57425 --- a/drivers/scsi/Makefile
57426 +++ b/drivers/scsi/Makefile
57427 @@ -113,6 +113,7 @@ obj-$(CONFIG_SCSI_MESH) += mesh.o
57428 obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
57429 obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
57430 obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
57431 +obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
57432 obj-$(CONFIG_SCSI_PPA) += ppa.o
57433 obj-$(CONFIG_SCSI_IMM) += imm.o
57434 obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
57435 @@ -133,6 +134,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
57436 obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
57437 obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
57438 obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
57439 +obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
57440
57441 obj-$(CONFIG_ARM) += arm/
57442
57443 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
57444 index cdbdec9..b7d560b 100644
57445 --- a/drivers/scsi/aacraid/aacraid.h
57446 +++ b/drivers/scsi/aacraid/aacraid.h
57447 @@ -471,7 +471,7 @@ struct adapter_ops
57448 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
57449 /* Administrative operations */
57450 int (*adapter_comm)(struct aac_dev * dev, int comm);
57451 -};
57452 +} __no_const;
57453
57454 /*
57455 * Define which interrupt handler needs to be installed
57456 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
57457 index a5b8e7b..a6a0e43 100644
57458 --- a/drivers/scsi/aacraid/commctrl.c
57459 +++ b/drivers/scsi/aacraid/commctrl.c
57460 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
57461 u32 actual_fibsize64, actual_fibsize = 0;
57462 int i;
57463
57464 + pax_track_stack();
57465
57466 if (dev->in_reset) {
57467 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
57468 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
57469 index 9b97c3e..f099725 100644
57470 --- a/drivers/scsi/aacraid/linit.c
57471 +++ b/drivers/scsi/aacraid/linit.c
57472 @@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
57473 #elif defined(__devinitconst)
57474 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
57475 #else
57476 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
57477 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
57478 #endif
57479 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
57480 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
57481 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
57482 index 996f722..9127845 100644
57483 --- a/drivers/scsi/aic94xx/aic94xx_init.c
57484 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
57485 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
57486 flash_error_table[i].reason);
57487 }
57488
57489 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
57490 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
57491 asd_show_update_bios, asd_store_update_bios);
57492
57493 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
57494 @@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
57495 .lldd_control_phy = asd_control_phy,
57496 };
57497
57498 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
57499 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
57500 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
57501 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
57502 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
57503 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
57504 index 58efd4b..cb48dc7 100644
57505 --- a/drivers/scsi/bfa/bfa_ioc.h
57506 +++ b/drivers/scsi/bfa/bfa_ioc.h
57507 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
57508 bfa_ioc_disable_cbfn_t disable_cbfn;
57509 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
57510 bfa_ioc_reset_cbfn_t reset_cbfn;
57511 -};
57512 +} __no_const;
57513
57514 /**
57515 * Heartbeat failure notification queue element.
57516 diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
57517 index 7ad177e..5503586 100644
57518 --- a/drivers/scsi/bfa/bfa_iocfc.h
57519 +++ b/drivers/scsi/bfa/bfa_iocfc.h
57520 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
57521 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
57522 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
57523 u32 *nvecs, u32 *maxvec);
57524 -};
57525 +} __no_const;
57526 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
57527
57528 struct bfa_iocfc_s {
57529 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
57530 index 4967643..cbec06b 100644
57531 --- a/drivers/scsi/dpt_i2o.c
57532 +++ b/drivers/scsi/dpt_i2o.c
57533 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
57534 dma_addr_t addr;
57535 ulong flags = 0;
57536
57537 + pax_track_stack();
57538 +
57539 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
57540 // get user msg size in u32s
57541 if(get_user(size, &user_msg[0])){
57542 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
57543 s32 rcode;
57544 dma_addr_t addr;
57545
57546 + pax_track_stack();
57547 +
57548 memset(msg, 0 , sizeof(msg));
57549 len = scsi_bufflen(cmd);
57550 direction = 0x00000000;
57551 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
57552 index c7076ce..e20c67c 100644
57553 --- a/drivers/scsi/eata.c
57554 +++ b/drivers/scsi/eata.c
57555 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
57556 struct hostdata *ha;
57557 char name[16];
57558
57559 + pax_track_stack();
57560 +
57561 sprintf(name, "%s%d", driver_name, j);
57562
57563 if (!request_region(port_base, REGION_SIZE, driver_name)) {
57564 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
57565 index 11ae5c9..891daec 100644
57566 --- a/drivers/scsi/fcoe/libfcoe.c
57567 +++ b/drivers/scsi/fcoe/libfcoe.c
57568 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
57569 size_t rlen;
57570 size_t dlen;
57571
57572 + pax_track_stack();
57573 +
57574 fiph = (struct fip_header *)skb->data;
57575 sub = fiph->fip_subcode;
57576 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
57577 diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
57578 index 71c7bbe..e93088a 100644
57579 --- a/drivers/scsi/fnic/fnic_main.c
57580 +++ b/drivers/scsi/fnic/fnic_main.c
57581 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
57582 /* Start local port initiatialization */
57583
57584 lp->link_up = 0;
57585 - lp->tt = fnic_transport_template;
57586 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
57587
57588 lp->max_retry_count = fnic->config.flogi_retries;
57589 lp->max_rport_retry_count = fnic->config.plogi_retries;
57590 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
57591 index bb96d74..9ec3ce4 100644
57592 --- a/drivers/scsi/gdth.c
57593 +++ b/drivers/scsi/gdth.c
57594 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
57595 ulong flags;
57596 gdth_ha_str *ha;
57597
57598 + pax_track_stack();
57599 +
57600 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
57601 return -EFAULT;
57602 ha = gdth_find_ha(ldrv.ionode);
57603 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
57604 gdth_ha_str *ha;
57605 int rval;
57606
57607 + pax_track_stack();
57608 +
57609 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
57610 res.number >= MAX_HDRIVES)
57611 return -EFAULT;
57612 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
57613 gdth_ha_str *ha;
57614 int rval;
57615
57616 + pax_track_stack();
57617 +
57618 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
57619 return -EFAULT;
57620 ha = gdth_find_ha(gen.ionode);
57621 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
57622 int i;
57623 gdth_cmd_str gdtcmd;
57624 char cmnd[MAX_COMMAND_SIZE];
57625 +
57626 + pax_track_stack();
57627 +
57628 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
57629
57630 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
57631 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
57632 index 1258da3..20d8ae6 100644
57633 --- a/drivers/scsi/gdth_proc.c
57634 +++ b/drivers/scsi/gdth_proc.c
57635 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
57636 ulong64 paddr;
57637
57638 char cmnd[MAX_COMMAND_SIZE];
57639 +
57640 + pax_track_stack();
57641 +
57642 memset(cmnd, 0xff, 12);
57643 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
57644
57645 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
57646 gdth_hget_str *phg;
57647 char cmnd[MAX_COMMAND_SIZE];
57648
57649 + pax_track_stack();
57650 +
57651 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
57652 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
57653 if (!gdtcmd || !estr)
57654 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
57655 index d03a926..f324286 100644
57656 --- a/drivers/scsi/hosts.c
57657 +++ b/drivers/scsi/hosts.c
57658 @@ -40,7 +40,7 @@
57659 #include "scsi_logging.h"
57660
57661
57662 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
57663 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
57664
57665
57666 static void scsi_host_cls_release(struct device *dev)
57667 @@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
57668 * subtract one because we increment first then return, but we need to
57669 * know what the next host number was before increment
57670 */
57671 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
57672 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
57673 shost->dma_channel = 0xff;
57674
57675 /* These three are default values which can be overridden */
57676 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
57677 index a601159..55e19d2 100644
57678 --- a/drivers/scsi/ipr.c
57679 +++ b/drivers/scsi/ipr.c
57680 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
57681 return true;
57682 }
57683
57684 -static struct ata_port_operations ipr_sata_ops = {
57685 +static const struct ata_port_operations ipr_sata_ops = {
57686 .phy_reset = ipr_ata_phy_reset,
57687 .hardreset = ipr_sata_reset,
57688 .post_internal_cmd = ipr_ata_post_internal,
57689 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
57690 index 4e49fbc..97907ff 100644
57691 --- a/drivers/scsi/ips.h
57692 +++ b/drivers/scsi/ips.h
57693 @@ -1027,7 +1027,7 @@ typedef struct {
57694 int (*intr)(struct ips_ha *);
57695 void (*enableint)(struct ips_ha *);
57696 uint32_t (*statupd)(struct ips_ha *);
57697 -} ips_hw_func_t;
57698 +} __no_const ips_hw_func_t;
57699
57700 typedef struct ips_ha {
57701 uint8_t ha_id[IPS_MAX_CHANNELS+1];
57702 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
57703 index c1c1574..a9c9348 100644
57704 --- a/drivers/scsi/libfc/fc_exch.c
57705 +++ b/drivers/scsi/libfc/fc_exch.c
57706 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
57707 * all together if not used XXX
57708 */
57709 struct {
57710 - atomic_t no_free_exch;
57711 - atomic_t no_free_exch_xid;
57712 - atomic_t xid_not_found;
57713 - atomic_t xid_busy;
57714 - atomic_t seq_not_found;
57715 - atomic_t non_bls_resp;
57716 + atomic_unchecked_t no_free_exch;
57717 + atomic_unchecked_t no_free_exch_xid;
57718 + atomic_unchecked_t xid_not_found;
57719 + atomic_unchecked_t xid_busy;
57720 + atomic_unchecked_t seq_not_found;
57721 + atomic_unchecked_t non_bls_resp;
57722 } stats;
57723 };
57724 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
57725 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
57726 /* allocate memory for exchange */
57727 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
57728 if (!ep) {
57729 - atomic_inc(&mp->stats.no_free_exch);
57730 + atomic_inc_unchecked(&mp->stats.no_free_exch);
57731 goto out;
57732 }
57733 memset(ep, 0, sizeof(*ep));
57734 @@ -557,7 +557,7 @@ out:
57735 return ep;
57736 err:
57737 spin_unlock_bh(&pool->lock);
57738 - atomic_inc(&mp->stats.no_free_exch_xid);
57739 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
57740 mempool_free(ep, mp->ep_pool);
57741 return NULL;
57742 }
57743 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57744 xid = ntohs(fh->fh_ox_id); /* we originated exch */
57745 ep = fc_exch_find(mp, xid);
57746 if (!ep) {
57747 - atomic_inc(&mp->stats.xid_not_found);
57748 + atomic_inc_unchecked(&mp->stats.xid_not_found);
57749 reject = FC_RJT_OX_ID;
57750 goto out;
57751 }
57752 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57753 ep = fc_exch_find(mp, xid);
57754 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
57755 if (ep) {
57756 - atomic_inc(&mp->stats.xid_busy);
57757 + atomic_inc_unchecked(&mp->stats.xid_busy);
57758 reject = FC_RJT_RX_ID;
57759 goto rel;
57760 }
57761 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57762 }
57763 xid = ep->xid; /* get our XID */
57764 } else if (!ep) {
57765 - atomic_inc(&mp->stats.xid_not_found);
57766 + atomic_inc_unchecked(&mp->stats.xid_not_found);
57767 reject = FC_RJT_RX_ID; /* XID not found */
57768 goto out;
57769 }
57770 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57771 } else {
57772 sp = &ep->seq;
57773 if (sp->id != fh->fh_seq_id) {
57774 - atomic_inc(&mp->stats.seq_not_found);
57775 + atomic_inc_unchecked(&mp->stats.seq_not_found);
57776 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
57777 goto rel;
57778 }
57779 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57780
57781 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
57782 if (!ep) {
57783 - atomic_inc(&mp->stats.xid_not_found);
57784 + atomic_inc_unchecked(&mp->stats.xid_not_found);
57785 goto out;
57786 }
57787 if (ep->esb_stat & ESB_ST_COMPLETE) {
57788 - atomic_inc(&mp->stats.xid_not_found);
57789 + atomic_inc_unchecked(&mp->stats.xid_not_found);
57790 goto out;
57791 }
57792 if (ep->rxid == FC_XID_UNKNOWN)
57793 ep->rxid = ntohs(fh->fh_rx_id);
57794 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
57795 - atomic_inc(&mp->stats.xid_not_found);
57796 + atomic_inc_unchecked(&mp->stats.xid_not_found);
57797 goto rel;
57798 }
57799 if (ep->did != ntoh24(fh->fh_s_id) &&
57800 ep->did != FC_FID_FLOGI) {
57801 - atomic_inc(&mp->stats.xid_not_found);
57802 + atomic_inc_unchecked(&mp->stats.xid_not_found);
57803 goto rel;
57804 }
57805 sof = fr_sof(fp);
57806 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57807 } else {
57808 sp = &ep->seq;
57809 if (sp->id != fh->fh_seq_id) {
57810 - atomic_inc(&mp->stats.seq_not_found);
57811 + atomic_inc_unchecked(&mp->stats.seq_not_found);
57812 goto rel;
57813 }
57814 }
57815 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57816 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
57817
57818 if (!sp)
57819 - atomic_inc(&mp->stats.xid_not_found);
57820 + atomic_inc_unchecked(&mp->stats.xid_not_found);
57821 else
57822 - atomic_inc(&mp->stats.non_bls_resp);
57823 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
57824
57825 fc_frame_free(fp);
57826 }
57827 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
57828 index 0ee989f..a582241 100644
57829 --- a/drivers/scsi/libsas/sas_ata.c
57830 +++ b/drivers/scsi/libsas/sas_ata.c
57831 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
57832 }
57833 }
57834
57835 -static struct ata_port_operations sas_sata_ops = {
57836 +static const struct ata_port_operations sas_sata_ops = {
57837 .phy_reset = sas_ata_phy_reset,
57838 .post_internal_cmd = sas_ata_post_internal,
57839 .qc_defer = ata_std_qc_defer,
57840 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
57841 index aa10f79..5cc79e4 100644
57842 --- a/drivers/scsi/lpfc/lpfc.h
57843 +++ b/drivers/scsi/lpfc/lpfc.h
57844 @@ -400,7 +400,7 @@ struct lpfc_vport {
57845 struct dentry *debug_nodelist;
57846 struct dentry *vport_debugfs_root;
57847 struct lpfc_debugfs_trc *disc_trc;
57848 - atomic_t disc_trc_cnt;
57849 + atomic_unchecked_t disc_trc_cnt;
57850 #endif
57851 uint8_t stat_data_enabled;
57852 uint8_t stat_data_blocked;
57853 @@ -725,8 +725,8 @@ struct lpfc_hba {
57854 struct timer_list fabric_block_timer;
57855 unsigned long bit_flags;
57856 #define FABRIC_COMANDS_BLOCKED 0
57857 - atomic_t num_rsrc_err;
57858 - atomic_t num_cmd_success;
57859 + atomic_unchecked_t num_rsrc_err;
57860 + atomic_unchecked_t num_cmd_success;
57861 unsigned long last_rsrc_error_time;
57862 unsigned long last_ramp_down_time;
57863 unsigned long last_ramp_up_time;
57864 @@ -740,7 +740,7 @@ struct lpfc_hba {
57865 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
57866 struct dentry *debug_slow_ring_trc;
57867 struct lpfc_debugfs_trc *slow_ring_trc;
57868 - atomic_t slow_ring_trc_cnt;
57869 + atomic_unchecked_t slow_ring_trc_cnt;
57870 #endif
57871
57872 /* Used for deferred freeing of ELS data buffers */
57873 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
57874 index 8d0f0de..7c77a62 100644
57875 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
57876 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
57877 @@ -124,7 +124,7 @@ struct lpfc_debug {
57878 int len;
57879 };
57880
57881 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
57882 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
57883 static unsigned long lpfc_debugfs_start_time = 0L;
57884
57885 /**
57886 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
57887 lpfc_debugfs_enable = 0;
57888
57889 len = 0;
57890 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
57891 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
57892 (lpfc_debugfs_max_disc_trc - 1);
57893 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
57894 dtp = vport->disc_trc + i;
57895 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
57896 lpfc_debugfs_enable = 0;
57897
57898 len = 0;
57899 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
57900 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
57901 (lpfc_debugfs_max_slow_ring_trc - 1);
57902 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
57903 dtp = phba->slow_ring_trc + i;
57904 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
57905 uint32_t *ptr;
57906 char buffer[1024];
57907
57908 + pax_track_stack();
57909 +
57910 off = 0;
57911 spin_lock_irq(&phba->hbalock);
57912
57913 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
57914 !vport || !vport->disc_trc)
57915 return;
57916
57917 - index = atomic_inc_return(&vport->disc_trc_cnt) &
57918 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
57919 (lpfc_debugfs_max_disc_trc - 1);
57920 dtp = vport->disc_trc + index;
57921 dtp->fmt = fmt;
57922 dtp->data1 = data1;
57923 dtp->data2 = data2;
57924 dtp->data3 = data3;
57925 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
57926 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
57927 dtp->jif = jiffies;
57928 #endif
57929 return;
57930 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
57931 !phba || !phba->slow_ring_trc)
57932 return;
57933
57934 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
57935 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
57936 (lpfc_debugfs_max_slow_ring_trc - 1);
57937 dtp = phba->slow_ring_trc + index;
57938 dtp->fmt = fmt;
57939 dtp->data1 = data1;
57940 dtp->data2 = data2;
57941 dtp->data3 = data3;
57942 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
57943 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
57944 dtp->jif = jiffies;
57945 #endif
57946 return;
57947 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
57948 "slow_ring buffer\n");
57949 goto debug_failed;
57950 }
57951 - atomic_set(&phba->slow_ring_trc_cnt, 0);
57952 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
57953 memset(phba->slow_ring_trc, 0,
57954 (sizeof(struct lpfc_debugfs_trc) *
57955 lpfc_debugfs_max_slow_ring_trc));
57956 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
57957 "buffer\n");
57958 goto debug_failed;
57959 }
57960 - atomic_set(&vport->disc_trc_cnt, 0);
57961 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
57962
57963 snprintf(name, sizeof(name), "discovery_trace");
57964 vport->debug_disc_trc =
57965 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
57966 index 549bc7d..8189dbb 100644
57967 --- a/drivers/scsi/lpfc/lpfc_init.c
57968 +++ b/drivers/scsi/lpfc/lpfc_init.c
57969 @@ -8021,8 +8021,10 @@ lpfc_init(void)
57970 printk(LPFC_COPYRIGHT "\n");
57971
57972 if (lpfc_enable_npiv) {
57973 - lpfc_transport_functions.vport_create = lpfc_vport_create;
57974 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
57975 + pax_open_kernel();
57976 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
57977 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
57978 + pax_close_kernel();
57979 }
57980 lpfc_transport_template =
57981 fc_attach_transport(&lpfc_transport_functions);
57982 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
57983 index c88f59f..ff2a42f 100644
57984 --- a/drivers/scsi/lpfc/lpfc_scsi.c
57985 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
57986 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
57987 uint32_t evt_posted;
57988
57989 spin_lock_irqsave(&phba->hbalock, flags);
57990 - atomic_inc(&phba->num_rsrc_err);
57991 + atomic_inc_unchecked(&phba->num_rsrc_err);
57992 phba->last_rsrc_error_time = jiffies;
57993
57994 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
57995 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
57996 unsigned long flags;
57997 struct lpfc_hba *phba = vport->phba;
57998 uint32_t evt_posted;
57999 - atomic_inc(&phba->num_cmd_success);
58000 + atomic_inc_unchecked(&phba->num_cmd_success);
58001
58002 if (vport->cfg_lun_queue_depth <= queue_depth)
58003 return;
58004 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
58005 int i;
58006 struct lpfc_rport_data *rdata;
58007
58008 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
58009 - num_cmd_success = atomic_read(&phba->num_cmd_success);
58010 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
58011 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
58012
58013 vports = lpfc_create_vport_work_array(phba);
58014 if (vports != NULL)
58015 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
58016 }
58017 }
58018 lpfc_destroy_vport_work_array(phba, vports);
58019 - atomic_set(&phba->num_rsrc_err, 0);
58020 - atomic_set(&phba->num_cmd_success, 0);
58021 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
58022 + atomic_set_unchecked(&phba->num_cmd_success, 0);
58023 }
58024
58025 /**
58026 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
58027 }
58028 }
58029 lpfc_destroy_vport_work_array(phba, vports);
58030 - atomic_set(&phba->num_rsrc_err, 0);
58031 - atomic_set(&phba->num_cmd_success, 0);
58032 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
58033 + atomic_set_unchecked(&phba->num_cmd_success, 0);
58034 }
58035
58036 /**
58037 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
58038 index 234f0b7..3020aea 100644
58039 --- a/drivers/scsi/megaraid/megaraid_mbox.c
58040 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
58041 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
58042 int rval;
58043 int i;
58044
58045 + pax_track_stack();
58046 +
58047 // Allocate memory for the base list of scb for management module.
58048 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
58049
58050 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
58051 index 7a117c1..ee01e9e 100644
58052 --- a/drivers/scsi/osd/osd_initiator.c
58053 +++ b/drivers/scsi/osd/osd_initiator.c
58054 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
58055 int nelem = ARRAY_SIZE(get_attrs), a = 0;
58056 int ret;
58057
58058 + pax_track_stack();
58059 +
58060 or = osd_start_request(od, GFP_KERNEL);
58061 if (!or)
58062 return -ENOMEM;
58063 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
58064 index 9ab8c86..9425ad3 100644
58065 --- a/drivers/scsi/pmcraid.c
58066 +++ b/drivers/scsi/pmcraid.c
58067 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
58068 res->scsi_dev = scsi_dev;
58069 scsi_dev->hostdata = res;
58070 res->change_detected = 0;
58071 - atomic_set(&res->read_failures, 0);
58072 - atomic_set(&res->write_failures, 0);
58073 + atomic_set_unchecked(&res->read_failures, 0);
58074 + atomic_set_unchecked(&res->write_failures, 0);
58075 rc = 0;
58076 }
58077 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
58078 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
58079
58080 /* If this was a SCSI read/write command keep count of errors */
58081 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
58082 - atomic_inc(&res->read_failures);
58083 + atomic_inc_unchecked(&res->read_failures);
58084 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
58085 - atomic_inc(&res->write_failures);
58086 + atomic_inc_unchecked(&res->write_failures);
58087
58088 if (!RES_IS_GSCSI(res->cfg_entry) &&
58089 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
58090 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
58091
58092 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
58093 /* add resources only after host is added into system */
58094 - if (!atomic_read(&pinstance->expose_resources))
58095 + if (!atomic_read_unchecked(&pinstance->expose_resources))
58096 return;
58097
58098 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
58099 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
58100 init_waitqueue_head(&pinstance->reset_wait_q);
58101
58102 atomic_set(&pinstance->outstanding_cmds, 0);
58103 - atomic_set(&pinstance->expose_resources, 0);
58104 + atomic_set_unchecked(&pinstance->expose_resources, 0);
58105
58106 INIT_LIST_HEAD(&pinstance->free_res_q);
58107 INIT_LIST_HEAD(&pinstance->used_res_q);
58108 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
58109 /* Schedule worker thread to handle CCN and take care of adding and
58110 * removing devices to OS
58111 */
58112 - atomic_set(&pinstance->expose_resources, 1);
58113 + atomic_set_unchecked(&pinstance->expose_resources, 1);
58114 schedule_work(&pinstance->worker_q);
58115 return rc;
58116
58117 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
58118 index 3441b3f..6cbe8f7 100644
58119 --- a/drivers/scsi/pmcraid.h
58120 +++ b/drivers/scsi/pmcraid.h
58121 @@ -690,7 +690,7 @@ struct pmcraid_instance {
58122 atomic_t outstanding_cmds;
58123
58124 /* should add/delete resources to mid-layer now ?*/
58125 - atomic_t expose_resources;
58126 + atomic_unchecked_t expose_resources;
58127
58128 /* Tasklet to handle deferred processing */
58129 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
58130 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
58131 struct list_head queue; /* link to "to be exposed" resources */
58132 struct pmcraid_config_table_entry cfg_entry;
58133 struct scsi_device *scsi_dev; /* Link scsi_device structure */
58134 - atomic_t read_failures; /* count of failed READ commands */
58135 - atomic_t write_failures; /* count of failed WRITE commands */
58136 + atomic_unchecked_t read_failures; /* count of failed READ commands */
58137 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
58138
58139 /* To indicate add/delete/modify during CCN */
58140 u8 change_detected;
58141 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
58142 index 2150618..7034215 100644
58143 --- a/drivers/scsi/qla2xxx/qla_def.h
58144 +++ b/drivers/scsi/qla2xxx/qla_def.h
58145 @@ -2089,7 +2089,7 @@ struct isp_operations {
58146
58147 int (*get_flash_version) (struct scsi_qla_host *, void *);
58148 int (*start_scsi) (srb_t *);
58149 -};
58150 +} __no_const;
58151
58152 /* MSI-X Support *************************************************************/
58153
58154 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
58155 index 81b5f29..2ae1fad 100644
58156 --- a/drivers/scsi/qla4xxx/ql4_def.h
58157 +++ b/drivers/scsi/qla4xxx/ql4_def.h
58158 @@ -240,7 +240,7 @@ struct ddb_entry {
58159 atomic_t retry_relogin_timer; /* Min Time between relogins
58160 * (4000 only) */
58161 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
58162 - atomic_t relogin_retry_count; /* Num of times relogin has been
58163 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
58164 * retried */
58165
58166 uint16_t port;
58167 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
58168 index af8c323..515dd51 100644
58169 --- a/drivers/scsi/qla4xxx/ql4_init.c
58170 +++ b/drivers/scsi/qla4xxx/ql4_init.c
58171 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
58172 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
58173 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
58174 atomic_set(&ddb_entry->relogin_timer, 0);
58175 - atomic_set(&ddb_entry->relogin_retry_count, 0);
58176 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
58177 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
58178 list_add_tail(&ddb_entry->list, &ha->ddb_list);
58179 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
58180 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
58181 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
58182 atomic_set(&ddb_entry->port_down_timer,
58183 ha->port_down_retry_count);
58184 - atomic_set(&ddb_entry->relogin_retry_count, 0);
58185 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
58186 atomic_set(&ddb_entry->relogin_timer, 0);
58187 clear_bit(DF_RELOGIN, &ddb_entry->flags);
58188 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
58189 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
58190 index 83c8b5e..a82b348 100644
58191 --- a/drivers/scsi/qla4xxx/ql4_os.c
58192 +++ b/drivers/scsi/qla4xxx/ql4_os.c
58193 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
58194 ddb_entry->fw_ddb_device_state ==
58195 DDB_DS_SESSION_FAILED) {
58196 /* Reset retry relogin timer */
58197 - atomic_inc(&ddb_entry->relogin_retry_count);
58198 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
58199 DEBUG2(printk("scsi%ld: index[%d] relogin"
58200 " timed out-retrying"
58201 " relogin (%d)\n",
58202 ha->host_no,
58203 ddb_entry->fw_ddb_index,
58204 - atomic_read(&ddb_entry->
58205 + atomic_read_unchecked(&ddb_entry->
58206 relogin_retry_count))
58207 );
58208 start_dpc++;
58209 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
58210 index dd098ca..686ce01 100644
58211 --- a/drivers/scsi/scsi.c
58212 +++ b/drivers/scsi/scsi.c
58213 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
58214 unsigned long timeout;
58215 int rtn = 0;
58216
58217 - atomic_inc(&cmd->device->iorequest_cnt);
58218 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
58219
58220 /* check if the device is still usable */
58221 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
58222 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
58223 index bc3e363..e1a8e50 100644
58224 --- a/drivers/scsi/scsi_debug.c
58225 +++ b/drivers/scsi/scsi_debug.c
58226 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
58227 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
58228 unsigned char *cmd = (unsigned char *)scp->cmnd;
58229
58230 + pax_track_stack();
58231 +
58232 if ((errsts = check_readiness(scp, 1, devip)))
58233 return errsts;
58234 memset(arr, 0, sizeof(arr));
58235 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
58236 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
58237 unsigned char *cmd = (unsigned char *)scp->cmnd;
58238
58239 + pax_track_stack();
58240 +
58241 if ((errsts = check_readiness(scp, 1, devip)))
58242 return errsts;
58243 memset(arr, 0, sizeof(arr));
58244 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
58245 index 8df12522..c4c1472 100644
58246 --- a/drivers/scsi/scsi_lib.c
58247 +++ b/drivers/scsi/scsi_lib.c
58248 @@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
58249 shost = sdev->host;
58250 scsi_init_cmd_errh(cmd);
58251 cmd->result = DID_NO_CONNECT << 16;
58252 - atomic_inc(&cmd->device->iorequest_cnt);
58253 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
58254
58255 /*
58256 * SCSI request completion path will do scsi_device_unbusy(),
58257 @@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
58258 */
58259 cmd->serial_number = 0;
58260
58261 - atomic_inc(&cmd->device->iodone_cnt);
58262 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
58263 if (cmd->result)
58264 - atomic_inc(&cmd->device->ioerr_cnt);
58265 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
58266
58267 disposition = scsi_decide_disposition(cmd);
58268 if (disposition != SUCCESS &&
58269 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
58270 index 91a93e0..eae0fe3 100644
58271 --- a/drivers/scsi/scsi_sysfs.c
58272 +++ b/drivers/scsi/scsi_sysfs.c
58273 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
58274 char *buf) \
58275 { \
58276 struct scsi_device *sdev = to_scsi_device(dev); \
58277 - unsigned long long count = atomic_read(&sdev->field); \
58278 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
58279 return snprintf(buf, 20, "0x%llx\n", count); \
58280 } \
58281 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
58282 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
58283 index 1030327..f91fd30 100644
58284 --- a/drivers/scsi/scsi_tgt_lib.c
58285 +++ b/drivers/scsi/scsi_tgt_lib.c
58286 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
58287 int err;
58288
58289 dprintk("%lx %u\n", uaddr, len);
58290 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
58291 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
58292 if (err) {
58293 /*
58294 * TODO: need to fixup sg_tablesize, max_segment_size,
58295 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
58296 index db02e31..1b42ea9 100644
58297 --- a/drivers/scsi/scsi_transport_fc.c
58298 +++ b/drivers/scsi/scsi_transport_fc.c
58299 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
58300 * Netlink Infrastructure
58301 */
58302
58303 -static atomic_t fc_event_seq;
58304 +static atomic_unchecked_t fc_event_seq;
58305
58306 /**
58307 * fc_get_event_number - Obtain the next sequential FC event number
58308 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
58309 u32
58310 fc_get_event_number(void)
58311 {
58312 - return atomic_add_return(1, &fc_event_seq);
58313 + return atomic_add_return_unchecked(1, &fc_event_seq);
58314 }
58315 EXPORT_SYMBOL(fc_get_event_number);
58316
58317 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
58318 {
58319 int error;
58320
58321 - atomic_set(&fc_event_seq, 0);
58322 + atomic_set_unchecked(&fc_event_seq, 0);
58323
58324 error = transport_class_register(&fc_host_class);
58325 if (error)
58326 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
58327 index de2f8c4..63c5278 100644
58328 --- a/drivers/scsi/scsi_transport_iscsi.c
58329 +++ b/drivers/scsi/scsi_transport_iscsi.c
58330 @@ -81,7 +81,7 @@ struct iscsi_internal {
58331 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
58332 };
58333
58334 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
58335 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
58336 static struct workqueue_struct *iscsi_eh_timer_workq;
58337
58338 /*
58339 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
58340 int err;
58341
58342 ihost = shost->shost_data;
58343 - session->sid = atomic_add_return(1, &iscsi_session_nr);
58344 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
58345
58346 if (id == ISCSI_MAX_TARGET) {
58347 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
58348 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
58349 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
58350 ISCSI_TRANSPORT_VERSION);
58351
58352 - atomic_set(&iscsi_session_nr, 0);
58353 + atomic_set_unchecked(&iscsi_session_nr, 0);
58354
58355 err = class_register(&iscsi_transport_class);
58356 if (err)
58357 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
58358 index 21a045e..ec89e03 100644
58359 --- a/drivers/scsi/scsi_transport_srp.c
58360 +++ b/drivers/scsi/scsi_transport_srp.c
58361 @@ -33,7 +33,7 @@
58362 #include "scsi_transport_srp_internal.h"
58363
58364 struct srp_host_attrs {
58365 - atomic_t next_port_id;
58366 + atomic_unchecked_t next_port_id;
58367 };
58368 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
58369
58370 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
58371 struct Scsi_Host *shost = dev_to_shost(dev);
58372 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
58373
58374 - atomic_set(&srp_host->next_port_id, 0);
58375 + atomic_set_unchecked(&srp_host->next_port_id, 0);
58376 return 0;
58377 }
58378
58379 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
58380 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
58381 rport->roles = ids->roles;
58382
58383 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
58384 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
58385 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
58386
58387 transport_setup_device(&rport->dev);
58388 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
58389 index 040f751..98a5ed2 100644
58390 --- a/drivers/scsi/sg.c
58391 +++ b/drivers/scsi/sg.c
58392 @@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
58393 sdp->disk->disk_name,
58394 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
58395 NULL,
58396 - (char *)arg);
58397 + (char __user *)arg);
58398 case BLKTRACESTART:
58399 return blk_trace_startstop(sdp->device->request_queue, 1);
58400 case BLKTRACESTOP:
58401 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
58402 const struct file_operations * fops;
58403 };
58404
58405 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
58406 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
58407 {"allow_dio", &adio_fops},
58408 {"debug", &debug_fops},
58409 {"def_reserved_size", &dressz_fops},
58410 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
58411 {
58412 int k, mask;
58413 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
58414 - struct sg_proc_leaf * leaf;
58415 + const struct sg_proc_leaf * leaf;
58416
58417 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
58418 if (!sg_proc_sgp)
58419 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
58420 index c19ca5e..3eb5959 100644
58421 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
58422 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
58423 @@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
58424 int do_iounmap = 0;
58425 int do_disable_device = 1;
58426
58427 + pax_track_stack();
58428 +
58429 memset(&sym_dev, 0, sizeof(sym_dev));
58430 memset(&nvram, 0, sizeof(nvram));
58431 sym_dev.pdev = pdev;
58432 diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
58433 new file mode 100644
58434 index 0000000..eabb432
58435 --- /dev/null
58436 +++ b/drivers/scsi/vmw_pvscsi.c
58437 @@ -0,0 +1,1401 @@
58438 +/*
58439 + * Linux driver for VMware's para-virtualized SCSI HBA.
58440 + *
58441 + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
58442 + *
58443 + * This program is free software; you can redistribute it and/or modify it
58444 + * under the terms of the GNU General Public License as published by the
58445 + * Free Software Foundation; version 2 of the License and no later version.
58446 + *
58447 + * This program is distributed in the hope that it will be useful, but
58448 + * WITHOUT ANY WARRANTY; without even the implied warranty of
58449 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
58450 + * NON INFRINGEMENT. See the GNU General Public License for more
58451 + * details.
58452 + *
58453 + * You should have received a copy of the GNU General Public License
58454 + * along with this program; if not, write to the Free Software
58455 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
58456 + *
58457 + * Maintained by: Alok N Kataria <akataria@vmware.com>
58458 + *
58459 + */
58460 +
58461 +#include <linux/kernel.h>
58462 +#include <linux/module.h>
58463 +#include <linux/moduleparam.h>
58464 +#include <linux/types.h>
58465 +#include <linux/interrupt.h>
58466 +#include <linux/workqueue.h>
58467 +#include <linux/pci.h>
58468 +
58469 +#include <scsi/scsi.h>
58470 +#include <scsi/scsi_host.h>
58471 +#include <scsi/scsi_cmnd.h>
58472 +#include <scsi/scsi_device.h>
58473 +
58474 +#include "vmw_pvscsi.h"
58475 +
58476 +#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
58477 +
58478 +MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
58479 +MODULE_AUTHOR("VMware, Inc.");
58480 +MODULE_LICENSE("GPL");
58481 +MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
58482 +
58483 +#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
58484 +#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
58485 +#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
58486 +#define SGL_SIZE PAGE_SIZE
58487 +
58488 +#define pvscsi_dev(adapter) (&(adapter->dev->dev))
58489 +
58490 +struct pvscsi_sg_list {
58491 + struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
58492 +};
58493 +
58494 +struct pvscsi_ctx {
58495 + /*
58496 + * The index of the context in cmd_map serves as the context ID for a
58497 + * 1-to-1 mapping completions back to requests.
58498 + */
58499 + struct scsi_cmnd *cmd;
58500 + struct pvscsi_sg_list *sgl;
58501 + struct list_head list;
58502 + dma_addr_t dataPA;
58503 + dma_addr_t sensePA;
58504 + dma_addr_t sglPA;
58505 +};
58506 +
58507 +struct pvscsi_adapter {
58508 + char *mmioBase;
58509 + unsigned int irq;
58510 + u8 rev;
58511 + bool use_msi;
58512 + bool use_msix;
58513 + bool use_msg;
58514 +
58515 + spinlock_t hw_lock;
58516 +
58517 + struct workqueue_struct *workqueue;
58518 + struct work_struct work;
58519 +
58520 + struct PVSCSIRingReqDesc *req_ring;
58521 + unsigned req_pages;
58522 + unsigned req_depth;
58523 + dma_addr_t reqRingPA;
58524 +
58525 + struct PVSCSIRingCmpDesc *cmp_ring;
58526 + unsigned cmp_pages;
58527 + dma_addr_t cmpRingPA;
58528 +
58529 + struct PVSCSIRingMsgDesc *msg_ring;
58530 + unsigned msg_pages;
58531 + dma_addr_t msgRingPA;
58532 +
58533 + struct PVSCSIRingsState *rings_state;
58534 + dma_addr_t ringStatePA;
58535 +
58536 + struct pci_dev *dev;
58537 + struct Scsi_Host *host;
58538 +
58539 + struct list_head cmd_pool;
58540 + struct pvscsi_ctx *cmd_map;
58541 +};
58542 +
58543 +
58544 +/* Command line parameters */
58545 +static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
58546 +static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
58547 +static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
58548 +static bool pvscsi_disable_msi;
58549 +static bool pvscsi_disable_msix;
58550 +static bool pvscsi_use_msg = true;
58551 +
58552 +#define PVSCSI_RW (S_IRUSR | S_IWUSR)
58553 +
58554 +module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
58555 +MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
58556 + __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
58557 +
58558 +module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
58559 +MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
58560 + __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
58561 +
58562 +module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
58563 +MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
58564 + __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
58565 +
58566 +module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
58567 +MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
58568 +
58569 +module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
58570 +MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
58571 +
58572 +module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
58573 +MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
58574 +
58575 +static const struct pci_device_id pvscsi_pci_tbl[] = {
58576 + { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
58577 + { 0 }
58578 +};
58579 +
58580 +MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
58581 +
58582 +static struct pvscsi_ctx *
58583 +pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
58584 +{
58585 + struct pvscsi_ctx *ctx, *end;
58586 +
58587 + end = &adapter->cmd_map[adapter->req_depth];
58588 + for (ctx = adapter->cmd_map; ctx < end; ctx++)
58589 + if (ctx->cmd == cmd)
58590 + return ctx;
58591 +
58592 + return NULL;
58593 +}
58594 +
58595 +static struct pvscsi_ctx *
58596 +pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
58597 +{
58598 + struct pvscsi_ctx *ctx;
58599 +
58600 + if (list_empty(&adapter->cmd_pool))
58601 + return NULL;
58602 +
58603 + ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
58604 + ctx->cmd = cmd;
58605 + list_del(&ctx->list);
58606 +
58607 + return ctx;
58608 +}
58609 +
58610 +static void pvscsi_release_context(struct pvscsi_adapter *adapter,
58611 + struct pvscsi_ctx *ctx)
58612 +{
58613 + ctx->cmd = NULL;
58614 + list_add(&ctx->list, &adapter->cmd_pool);
58615 +}
58616 +
58617 +/*
58618 + * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
58619 + * non-zero integer. ctx always points to an entry in cmd_map array, hence
58620 + * the return value is always >=1.
58621 + */
58622 +static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
58623 + const struct pvscsi_ctx *ctx)
58624 +{
58625 + return ctx - adapter->cmd_map + 1;
58626 +}
58627 +
58628 +static struct pvscsi_ctx *
58629 +pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
58630 +{
58631 + return &adapter->cmd_map[context - 1];
58632 +}
58633 +
58634 +static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
58635 + u32 offset, u32 val)
58636 +{
58637 + writel(val, adapter->mmioBase + offset);
58638 +}
58639 +
58640 +static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
58641 +{
58642 + return readl(adapter->mmioBase + offset);
58643 +}
58644 +
58645 +static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
58646 +{
58647 + return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
58648 +}
58649 +
58650 +static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
58651 + u32 val)
58652 +{
58653 + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
58654 +}
58655 +
58656 +static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
58657 +{
58658 + u32 intr_bits;
58659 +
58660 + intr_bits = PVSCSI_INTR_CMPL_MASK;
58661 + if (adapter->use_msg)
58662 + intr_bits |= PVSCSI_INTR_MSG_MASK;
58663 +
58664 + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
58665 +}
58666 +
58667 +static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
58668 +{
58669 + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
58670 +}
58671 +
58672 +static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
58673 + u32 cmd, const void *desc, size_t len)
58674 +{
58675 + const u32 *ptr = desc;
58676 + size_t i;
58677 +
58678 + len /= sizeof(*ptr);
58679 + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
58680 + for (i = 0; i < len; i++)
58681 + pvscsi_reg_write(adapter,
58682 + PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
58683 +}
58684 +
58685 +static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
58686 + const struct pvscsi_ctx *ctx)
58687 +{
58688 + struct PVSCSICmdDescAbortCmd cmd = { 0 };
58689 +
58690 + cmd.target = ctx->cmd->device->id;
58691 + cmd.context = pvscsi_map_context(adapter, ctx);
58692 +
58693 + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
58694 +}
58695 +
58696 +static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
58697 +{
58698 + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
58699 +}
58700 +
58701 +static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
58702 +{
58703 + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
58704 +}
58705 +
58706 +static int scsi_is_rw(unsigned char op)
58707 +{
58708 + return op == READ_6 || op == WRITE_6 ||
58709 + op == READ_10 || op == WRITE_10 ||
58710 + op == READ_12 || op == WRITE_12 ||
58711 + op == READ_16 || op == WRITE_16;
58712 +}
58713 +
58714 +static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
58715 + unsigned char op)
58716 +{
58717 + if (scsi_is_rw(op))
58718 + pvscsi_kick_rw_io(adapter);
58719 + else
58720 + pvscsi_process_request_ring(adapter);
58721 +}
58722 +
58723 +static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
58724 +{
58725 + dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
58726 +
58727 + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
58728 +}
58729 +
58730 +static void ll_bus_reset(const struct pvscsi_adapter *adapter)
58731 +{
58732 + dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
58733 +
58734 + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
58735 +}
58736 +
58737 +static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
58738 +{
58739 + struct PVSCSICmdDescResetDevice cmd = { 0 };
58740 +
58741 + dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
58742 +
58743 + cmd.target = target;
58744 +
58745 + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
58746 + &cmd, sizeof(cmd));
58747 +}
58748 +
58749 +static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
58750 + struct scatterlist *sg, unsigned count)
58751 +{
58752 + unsigned i;
58753 + struct PVSCSISGElement *sge;
58754 +
58755 + BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
58756 +
58757 + sge = &ctx->sgl->sge[0];
58758 + for (i = 0; i < count; i++, sg++) {
58759 + sge[i].addr = sg_dma_address(sg);
58760 + sge[i].length = sg_dma_len(sg);
58761 + sge[i].flags = 0;
58762 + }
58763 +}
58764 +
58765 +/*
58766 + * Map all data buffers for a command into PCI space and
58767 + * setup the scatter/gather list if needed.
58768 + */
58769 +static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
58770 + struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
58771 + struct PVSCSIRingReqDesc *e)
58772 +{
58773 + unsigned count;
58774 + unsigned bufflen = scsi_bufflen(cmd);
58775 + struct scatterlist *sg;
58776 +
58777 + e->dataLen = bufflen;
58778 + e->dataAddr = 0;
58779 + if (bufflen == 0)
58780 + return;
58781 +
58782 + sg = scsi_sglist(cmd);
58783 + count = scsi_sg_count(cmd);
58784 + if (count != 0) {
58785 + int segs = scsi_dma_map(cmd);
58786 + if (segs > 1) {
58787 + pvscsi_create_sg(ctx, sg, segs);
58788 +
58789 + e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
58790 + ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
58791 + SGL_SIZE, PCI_DMA_TODEVICE);
58792 + e->dataAddr = ctx->sglPA;
58793 + } else
58794 + e->dataAddr = sg_dma_address(sg);
58795 + } else {
58796 + /*
58797 + * In case there is no S/G list, scsi_sglist points
58798 + * directly to the buffer.
58799 + */
58800 + ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
58801 + cmd->sc_data_direction);
58802 + e->dataAddr = ctx->dataPA;
58803 + }
58804 +}
58805 +
58806 +static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
58807 + struct pvscsi_ctx *ctx)
58808 +{
58809 + struct scsi_cmnd *cmd;
58810 + unsigned bufflen;
58811 +
58812 + cmd = ctx->cmd;
58813 + bufflen = scsi_bufflen(cmd);
58814 +
58815 + if (bufflen != 0) {
58816 + unsigned count = scsi_sg_count(cmd);
58817 +
58818 + if (count != 0) {
58819 + scsi_dma_unmap(cmd);
58820 + if (ctx->sglPA) {
58821 + pci_unmap_single(adapter->dev, ctx->sglPA,
58822 + SGL_SIZE, PCI_DMA_TODEVICE);
58823 + ctx->sglPA = 0;
58824 + }
58825 + } else
58826 + pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
58827 + cmd->sc_data_direction);
58828 + }
58829 + if (cmd->sense_buffer)
58830 + pci_unmap_single(adapter->dev, ctx->sensePA,
58831 + SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
58832 +}
58833 +
58834 +static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
58835 +{
58836 + adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
58837 + &adapter->ringStatePA);
58838 + if (!adapter->rings_state)
58839 + return -ENOMEM;
58840 +
58841 + adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
58842 + pvscsi_ring_pages);
58843 + adapter->req_depth = adapter->req_pages
58844 + * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
58845 + adapter->req_ring = pci_alloc_consistent(adapter->dev,
58846 + adapter->req_pages * PAGE_SIZE,
58847 + &adapter->reqRingPA);
58848 + if (!adapter->req_ring)
58849 + return -ENOMEM;
58850 +
58851 + adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
58852 + pvscsi_ring_pages);
58853 + adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
58854 + adapter->cmp_pages * PAGE_SIZE,
58855 + &adapter->cmpRingPA);
58856 + if (!adapter->cmp_ring)
58857 + return -ENOMEM;
58858 +
58859 + BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
58860 + BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
58861 + BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
58862 +
58863 + if (!adapter->use_msg)
58864 + return 0;
58865 +
58866 + adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
58867 + pvscsi_msg_ring_pages);
58868 + adapter->msg_ring = pci_alloc_consistent(adapter->dev,
58869 + adapter->msg_pages * PAGE_SIZE,
58870 + &adapter->msgRingPA);
58871 + if (!adapter->msg_ring)
58872 + return -ENOMEM;
58873 + BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
58874 +
58875 + return 0;
58876 +}
58877 +
58878 +static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
58879 +{
58880 + struct PVSCSICmdDescSetupRings cmd = { 0 };
58881 + dma_addr_t base;
58882 + unsigned i;
58883 +
58884 + cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
58885 + cmd.reqRingNumPages = adapter->req_pages;
58886 + cmd.cmpRingNumPages = adapter->cmp_pages;
58887 +
58888 + base = adapter->reqRingPA;
58889 + for (i = 0; i < adapter->req_pages; i++) {
58890 + cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
58891 + base += PAGE_SIZE;
58892 + }
58893 +
58894 + base = adapter->cmpRingPA;
58895 + for (i = 0; i < adapter->cmp_pages; i++) {
58896 + cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
58897 + base += PAGE_SIZE;
58898 + }
58899 +
58900 + memset(adapter->rings_state, 0, PAGE_SIZE);
58901 + memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
58902 + memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
58903 +
58904 + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
58905 + &cmd, sizeof(cmd));
58906 +
58907 + if (adapter->use_msg) {
58908 + struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
58909 +
58910 + cmd_msg.numPages = adapter->msg_pages;
58911 +
58912 + base = adapter->msgRingPA;
58913 + for (i = 0; i < adapter->msg_pages; i++) {
58914 + cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
58915 + base += PAGE_SIZE;
58916 + }
58917 + memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
58918 +
58919 + pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
58920 + &cmd_msg, sizeof(cmd_msg));
58921 + }
58922 +}
58923 +
58924 +/*
58925 + * Pull a completion descriptor off and pass the completion back
58926 + * to the SCSI mid layer.
58927 + */
58928 +static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
58929 + const struct PVSCSIRingCmpDesc *e)
58930 +{
58931 + struct pvscsi_ctx *ctx;
58932 + struct scsi_cmnd *cmd;
58933 + u32 btstat = e->hostStatus;
58934 + u32 sdstat = e->scsiStatus;
58935 +
58936 + ctx = pvscsi_get_context(adapter, e->context);
58937 + cmd = ctx->cmd;
58938 + pvscsi_unmap_buffers(adapter, ctx);
58939 + pvscsi_release_context(adapter, ctx);
58940 + cmd->result = 0;
58941 +
58942 + if (sdstat != SAM_STAT_GOOD &&
58943 + (btstat == BTSTAT_SUCCESS ||
58944 + btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
58945 + btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
58946 + cmd->result = (DID_OK << 16) | sdstat;
58947 + if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
58948 + cmd->result |= (DRIVER_SENSE << 24);
58949 + } else
58950 + switch (btstat) {
58951 + case BTSTAT_SUCCESS:
58952 + case BTSTAT_LINKED_COMMAND_COMPLETED:
58953 + case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
58954 + /* If everything went fine, let's move on.. */
58955 + cmd->result = (DID_OK << 16);
58956 + break;
58957 +
58958 + case BTSTAT_DATARUN:
58959 + case BTSTAT_DATA_UNDERRUN:
58960 + /* Report residual data in underruns */
58961 + scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
58962 + cmd->result = (DID_ERROR << 16);
58963 + break;
58964 +
58965 + case BTSTAT_SELTIMEO:
58966 + /* Our emulation returns this for non-connected devs */
58967 + cmd->result = (DID_BAD_TARGET << 16);
58968 + break;
58969 +
58970 + case BTSTAT_LUNMISMATCH:
58971 + case BTSTAT_TAGREJECT:
58972 + case BTSTAT_BADMSG:
58973 + cmd->result = (DRIVER_INVALID << 24);
58974 + /* fall through */
58975 +
58976 + case BTSTAT_HAHARDWARE:
58977 + case BTSTAT_INVPHASE:
58978 + case BTSTAT_HATIMEOUT:
58979 + case BTSTAT_NORESPONSE:
58980 + case BTSTAT_DISCONNECT:
58981 + case BTSTAT_HASOFTWARE:
58982 + case BTSTAT_BUSFREE:
58983 + case BTSTAT_SENSFAILED:
58984 + cmd->result |= (DID_ERROR << 16);
58985 + break;
58986 +
58987 + case BTSTAT_SENTRST:
58988 + case BTSTAT_RECVRST:
58989 + case BTSTAT_BUSRESET:
58990 + cmd->result = (DID_RESET << 16);
58991 + break;
58992 +
58993 + case BTSTAT_ABORTQUEUE:
58994 + cmd->result = (DID_ABORT << 16);
58995 + break;
58996 +
58997 + case BTSTAT_SCSIPARITY:
58998 + cmd->result = (DID_PARITY << 16);
58999 + break;
59000 +
59001 + default:
59002 + cmd->result = (DID_ERROR << 16);
59003 + scmd_printk(KERN_DEBUG, cmd,
59004 + "Unknown completion status: 0x%x\n",
59005 + btstat);
59006 + }
59007 +
59008 + dev_dbg(&cmd->device->sdev_gendev,
59009 + "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
59010 + cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
59011 +
59012 + cmd->scsi_done(cmd);
59013 +}
59014 +
59015 +/*
59016 + * barrier usage : Since the PVSCSI device is emulated, there could be cases
59017 + * where we may want to serialize some accesses between the driver and the
59018 + * emulation layer. We use compiler barriers instead of the more expensive
59019 + * memory barriers because PVSCSI is only supported on X86 which has strong
59020 + * memory access ordering.
59021 + */
59022 +static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
59023 +{
59024 + struct PVSCSIRingsState *s = adapter->rings_state;
59025 + struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
59026 + u32 cmp_entries = s->cmpNumEntriesLog2;
59027 +
59028 + while (s->cmpConsIdx != s->cmpProdIdx) {
59029 + struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
59030 + MASK(cmp_entries));
59031 + /*
59032 + * This barrier() ensures that *e is not dereferenced while
59033 + * the device emulation still writes data into the slot.
59034 + * Since the device emulation advances s->cmpProdIdx only after
59035 + * updating the slot we want to check it first.
59036 + */
59037 + barrier();
59038 + pvscsi_complete_request(adapter, e);
59039 + /*
59040 + * This barrier() ensures that compiler doesn't reorder write
59041 + * to s->cmpConsIdx before the read of (*e) inside
59042 + * pvscsi_complete_request. Otherwise, device emulation may
59043 + * overwrite *e before we had a chance to read it.
59044 + */
59045 + barrier();
59046 + s->cmpConsIdx++;
59047 + }
59048 +}
59049 +
59050 +/*
59051 + * Translate a Linux SCSI request into a request ring entry.
59052 + */
59053 +static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
59054 + struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
59055 +{
59056 + struct PVSCSIRingsState *s;
59057 + struct PVSCSIRingReqDesc *e;
59058 + struct scsi_device *sdev;
59059 + u32 req_entries;
59060 +
59061 + s = adapter->rings_state;
59062 + sdev = cmd->device;
59063 + req_entries = s->reqNumEntriesLog2;
59064 +
59065 + /*
59066 + * If this condition holds, we might have room on the request ring, but
59067 + * we might not have room on the completion ring for the response.
59068 + * However, we have already ruled out this possibility - we would not
59069 + * have successfully allocated a context if it were true, since we only
59070 + * have one context per request entry. Check for it anyway, since it
59071 + * would be a serious bug.
59072 + */
59073 + if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
59074 + scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
59075 + "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
59076 + s->reqProdIdx, s->cmpConsIdx);
59077 + return -1;
59078 + }
59079 +
59080 + e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
59081 +
59082 + e->bus = sdev->channel;
59083 + e->target = sdev->id;
59084 + memset(e->lun, 0, sizeof(e->lun));
59085 + e->lun[1] = sdev->lun;
59086 +
59087 + if (cmd->sense_buffer) {
59088 + ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
59089 + SCSI_SENSE_BUFFERSIZE,
59090 + PCI_DMA_FROMDEVICE);
59091 + e->senseAddr = ctx->sensePA;
59092 + e->senseLen = SCSI_SENSE_BUFFERSIZE;
59093 + } else {
59094 + e->senseLen = 0;
59095 + e->senseAddr = 0;
59096 + }
59097 + e->cdbLen = cmd->cmd_len;
59098 + e->vcpuHint = smp_processor_id();
59099 + memcpy(e->cdb, cmd->cmnd, e->cdbLen);
59100 +
59101 + e->tag = SIMPLE_QUEUE_TAG;
59102 + if (sdev->tagged_supported &&
59103 + (cmd->tag == HEAD_OF_QUEUE_TAG ||
59104 + cmd->tag == ORDERED_QUEUE_TAG))
59105 + e->tag = cmd->tag;
59106 +
59107 + if (cmd->sc_data_direction == DMA_FROM_DEVICE)
59108 + e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
59109 + else if (cmd->sc_data_direction == DMA_TO_DEVICE)
59110 + e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
59111 + else if (cmd->sc_data_direction == DMA_NONE)
59112 + e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
59113 + else
59114 + e->flags = 0;
59115 +
59116 + pvscsi_map_buffers(adapter, ctx, cmd, e);
59117 +
59118 + e->context = pvscsi_map_context(adapter, ctx);
59119 +
59120 + barrier();
59121 +
59122 + s->reqProdIdx++;
59123 +
59124 + return 0;
59125 +}
59126 +
59127 +static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
59128 +{
59129 + struct Scsi_Host *host = cmd->device->host;
59130 + struct pvscsi_adapter *adapter = shost_priv(host);
59131 + struct pvscsi_ctx *ctx;
59132 + unsigned long flags;
59133 +
59134 + spin_lock_irqsave(&adapter->hw_lock, flags);
59135 +
59136 + ctx = pvscsi_acquire_context(adapter, cmd);
59137 + if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
59138 + if (ctx)
59139 + pvscsi_release_context(adapter, ctx);
59140 + spin_unlock_irqrestore(&adapter->hw_lock, flags);
59141 + return SCSI_MLQUEUE_HOST_BUSY;
59142 + }
59143 +
59144 + cmd->scsi_done = done;
59145 +
59146 + dev_dbg(&cmd->device->sdev_gendev,
59147 + "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
59148 +
59149 + spin_unlock_irqrestore(&adapter->hw_lock, flags);
59150 +
59151 + pvscsi_kick_io(adapter, cmd->cmnd[0]);
59152 +
59153 + return 0;
59154 +}
59155 +
59156 +static int pvscsi_abort(struct scsi_cmnd *cmd)
59157 +{
59158 + struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
59159 + struct pvscsi_ctx *ctx;
59160 + unsigned long flags;
59161 +
59162 + scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
59163 + adapter->host->host_no, cmd);
59164 +
59165 + spin_lock_irqsave(&adapter->hw_lock, flags);
59166 +
59167 + /*
59168 + * Poll the completion ring first - we might be trying to abort
59169 + * a command that is waiting to be dispatched in the completion ring.
59170 + */
59171 + pvscsi_process_completion_ring(adapter);
59172 +
59173 + /*
59174 + * If there is no context for the command, it either already succeeded
59175 + * or else was never properly issued. Not our problem.
59176 + */
59177 + ctx = pvscsi_find_context(adapter, cmd);
59178 + if (!ctx) {
59179 + scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
59180 + goto out;
59181 + }
59182 +
59183 + pvscsi_abort_cmd(adapter, ctx);
59184 +
59185 + pvscsi_process_completion_ring(adapter);
59186 +
59187 +out:
59188 + spin_unlock_irqrestore(&adapter->hw_lock, flags);
59189 + return SUCCESS;
59190 +}
59191 +
59192 +/*
59193 + * Abort all outstanding requests. This is only safe to use if the completion
59194 + * ring will never be walked again or the device has been reset, because it
59195 + * destroys the 1-1 mapping between context field passed to emulation and our
59196 + * request structure.
59197 + */
59198 +static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
59199 +{
59200 + unsigned i;
59201 +
59202 + for (i = 0; i < adapter->req_depth; i++) {
59203 + struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
59204 + struct scsi_cmnd *cmd = ctx->cmd;
59205 + if (cmd) {
59206 + scmd_printk(KERN_ERR, cmd,
59207 + "Forced reset on cmd %p\n", cmd);
59208 + pvscsi_unmap_buffers(adapter, ctx);
59209 + pvscsi_release_context(adapter, ctx);
59210 + cmd->result = (DID_RESET << 16);
59211 + cmd->scsi_done(cmd);
59212 + }
59213 + }
59214 +}
59215 +
59216 +static int pvscsi_host_reset(struct scsi_cmnd *cmd)
59217 +{
59218 + struct Scsi_Host *host = cmd->device->host;
59219 + struct pvscsi_adapter *adapter = shost_priv(host);
59220 + unsigned long flags;
59221 + bool use_msg;
59222 +
59223 + scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
59224 +
59225 + spin_lock_irqsave(&adapter->hw_lock, flags);
59226 +
59227 + use_msg = adapter->use_msg;
59228 +
59229 + if (use_msg) {
59230 + adapter->use_msg = 0;
59231 + spin_unlock_irqrestore(&adapter->hw_lock, flags);
59232 +
59233 + /*
59234 + * Now that we know that the ISR won't add more work on the
59235 + * workqueue we can safely flush any outstanding work.
59236 + */
59237 + flush_workqueue(adapter->workqueue);
59238 + spin_lock_irqsave(&adapter->hw_lock, flags);
59239 + }
59240 +
59241 + /*
59242 + * We're going to tear down the entire ring structure and set it back
59243 + * up, so stalling new requests until all completions are flushed and
59244 + * the rings are back in place.
59245 + */
59246 +
59247 + pvscsi_process_request_ring(adapter);
59248 +
59249 + ll_adapter_reset(adapter);
59250 +
59251 + /*
59252 + * Now process any completions. Note we do this AFTER adapter reset,
59253 + * which is strange, but stops races where completions get posted
59254 + * between processing the ring and issuing the reset. The backend will
59255 + * not touch the ring memory after reset, so the immediately pre-reset
59256 + * completion ring state is still valid.
59257 + */
59258 + pvscsi_process_completion_ring(adapter);
59259 +
59260 + pvscsi_reset_all(adapter);
59261 + adapter->use_msg = use_msg;
59262 + pvscsi_setup_all_rings(adapter);
59263 + pvscsi_unmask_intr(adapter);
59264 +
59265 + spin_unlock_irqrestore(&adapter->hw_lock, flags);
59266 +
59267 + return SUCCESS;
59268 +}
59269 +
59270 +static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
59271 +{
59272 + struct Scsi_Host *host = cmd->device->host;
59273 + struct pvscsi_adapter *adapter = shost_priv(host);
59274 + unsigned long flags;
59275 +
59276 + scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
59277 +
59278 + /*
59279 + * We don't want to queue new requests for this bus after
59280 + * flushing all pending requests to emulation, since new
59281 + * requests could then sneak in during this bus reset phase,
59282 + * so take the lock now.
59283 + */
59284 + spin_lock_irqsave(&adapter->hw_lock, flags);
59285 +
59286 + pvscsi_process_request_ring(adapter);
59287 + ll_bus_reset(adapter);
59288 + pvscsi_process_completion_ring(adapter);
59289 +
59290 + spin_unlock_irqrestore(&adapter->hw_lock, flags);
59291 +
59292 + return SUCCESS;
59293 +}
59294 +
59295 +static int pvscsi_device_reset(struct scsi_cmnd *cmd)
59296 +{
59297 + struct Scsi_Host *host = cmd->device->host;
59298 + struct pvscsi_adapter *adapter = shost_priv(host);
59299 + unsigned long flags;
59300 +
59301 + scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
59302 + host->host_no, cmd->device->id);
59303 +
59304 + /*
59305 + * We don't want to queue new requests for this device after flushing
59306 + * all pending requests to emulation, since new requests could then
59307 + * sneak in during this device reset phase, so take the lock now.
59308 + */
59309 + spin_lock_irqsave(&adapter->hw_lock, flags);
59310 +
59311 + pvscsi_process_request_ring(adapter);
59312 + ll_device_reset(adapter, cmd->device->id);
59313 + pvscsi_process_completion_ring(adapter);
59314 +
59315 + spin_unlock_irqrestore(&adapter->hw_lock, flags);
59316 +
59317 + return SUCCESS;
59318 +}
59319 +
59320 +static struct scsi_host_template pvscsi_template;
59321 +
59322 +static const char *pvscsi_info(struct Scsi_Host *host)
59323 +{
59324 + struct pvscsi_adapter *adapter = shost_priv(host);
59325 + static char buf[256];
59326 +
59327 + sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
59328 + "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
59329 + adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
59330 + pvscsi_template.cmd_per_lun);
59331 +
59332 + return buf;
59333 +}
59334 +
59335 +static struct scsi_host_template pvscsi_template = {
59336 + .module = THIS_MODULE,
59337 + .name = "VMware PVSCSI Host Adapter",
59338 + .proc_name = "vmw_pvscsi",
59339 + .info = pvscsi_info,
59340 + .queuecommand = pvscsi_queue,
59341 + .this_id = -1,
59342 + .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
59343 + .dma_boundary = UINT_MAX,
59344 + .max_sectors = 0xffff,
59345 + .use_clustering = ENABLE_CLUSTERING,
59346 + .eh_abort_handler = pvscsi_abort,
59347 + .eh_device_reset_handler = pvscsi_device_reset,
59348 + .eh_bus_reset_handler = pvscsi_bus_reset,
59349 + .eh_host_reset_handler = pvscsi_host_reset,
59350 +};
59351 +
59352 +static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
59353 + const struct PVSCSIRingMsgDesc *e)
59354 +{
59355 + struct PVSCSIRingsState *s = adapter->rings_state;
59356 + struct Scsi_Host *host = adapter->host;
59357 + struct scsi_device *sdev;
59358 +
59359 + printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
59360 + e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
59361 +
59362 + BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
59363 +
59364 + if (e->type == PVSCSI_MSG_DEV_ADDED) {
59365 + struct PVSCSIMsgDescDevStatusChanged *desc;
59366 + desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
59367 +
59368 + printk(KERN_INFO
59369 + "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
59370 + desc->bus, desc->target, desc->lun[1]);
59371 +
59372 + if (!scsi_host_get(host))
59373 + return;
59374 +
59375 + sdev = scsi_device_lookup(host, desc->bus, desc->target,
59376 + desc->lun[1]);
59377 + if (sdev) {
59378 + printk(KERN_INFO "vmw_pvscsi: device already exists\n");
59379 + scsi_device_put(sdev);
59380 + } else
59381 + scsi_add_device(adapter->host, desc->bus,
59382 + desc->target, desc->lun[1]);
59383 +
59384 + scsi_host_put(host);
59385 + } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
59386 + struct PVSCSIMsgDescDevStatusChanged *desc;
59387 + desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
59388 +
59389 + printk(KERN_INFO
59390 + "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
59391 + desc->bus, desc->target, desc->lun[1]);
59392 +
59393 + if (!scsi_host_get(host))
59394 + return;
59395 +
59396 + sdev = scsi_device_lookup(host, desc->bus, desc->target,
59397 + desc->lun[1]);
59398 + if (sdev) {
59399 + scsi_remove_device(sdev);
59400 + scsi_device_put(sdev);
59401 + } else
59402 + printk(KERN_INFO
59403 + "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
59404 + desc->bus, desc->target, desc->lun[1]);
59405 +
59406 + scsi_host_put(host);
59407 + }
59408 +}
59409 +
59410 +static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
59411 +{
59412 + struct PVSCSIRingsState *s = adapter->rings_state;
59413 +
59414 + return s->msgProdIdx != s->msgConsIdx;
59415 +}
59416 +
59417 +static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
59418 +{
59419 + struct PVSCSIRingsState *s = adapter->rings_state;
59420 + struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
59421 + u32 msg_entries = s->msgNumEntriesLog2;
59422 +
59423 + while (pvscsi_msg_pending(adapter)) {
59424 + struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
59425 + MASK(msg_entries));
59426 +
59427 + barrier();
59428 + pvscsi_process_msg(adapter, e);
59429 + barrier();
59430 + s->msgConsIdx++;
59431 + }
59432 +}
59433 +
59434 +static void pvscsi_msg_workqueue_handler(struct work_struct *data)
59435 +{
59436 + struct pvscsi_adapter *adapter;
59437 +
59438 + adapter = container_of(data, struct pvscsi_adapter, work);
59439 +
59440 + pvscsi_process_msg_ring(adapter);
59441 +}
59442 +
59443 +static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
59444 +{
59445 + char name[32];
59446 +
59447 + if (!pvscsi_use_msg)
59448 + return 0;
59449 +
59450 + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
59451 + PVSCSI_CMD_SETUP_MSG_RING);
59452 +
59453 + if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
59454 + return 0;
59455 +
59456 + snprintf(name, sizeof(name),
59457 + "vmw_pvscsi_wq_%u", adapter->host->host_no);
59458 +
59459 + adapter->workqueue = create_singlethread_workqueue(name);
59460 + if (!adapter->workqueue) {
59461 + printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
59462 + return 0;
59463 + }
59464 + INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
59465 +
59466 + return 1;
59467 +}
59468 +
59469 +static irqreturn_t pvscsi_isr(int irq, void *devp)
59470 +{
59471 + struct pvscsi_adapter *adapter = devp;
59472 + int handled;
59473 +
59474 + if (adapter->use_msi || adapter->use_msix)
59475 + handled = true;
59476 + else {
59477 + u32 val = pvscsi_read_intr_status(adapter);
59478 + handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
59479 + if (handled)
59480 + pvscsi_write_intr_status(devp, val);
59481 + }
59482 +
59483 + if (handled) {
59484 + unsigned long flags;
59485 +
59486 + spin_lock_irqsave(&adapter->hw_lock, flags);
59487 +
59488 + pvscsi_process_completion_ring(adapter);
59489 + if (adapter->use_msg && pvscsi_msg_pending(adapter))
59490 + queue_work(adapter->workqueue, &adapter->work);
59491 +
59492 + spin_unlock_irqrestore(&adapter->hw_lock, flags);
59493 + }
59494 +
59495 + return IRQ_RETVAL(handled);
59496 +}
59497 +
59498 +static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
59499 +{
59500 + struct pvscsi_ctx *ctx = adapter->cmd_map;
59501 + unsigned i;
59502 +
59503 + for (i = 0; i < adapter->req_depth; ++i, ++ctx)
59504 + kfree(ctx->sgl);
59505 +}
59506 +
59507 +static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq)
59508 +{
59509 + struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
59510 + int ret;
59511 +
59512 + ret = pci_enable_msix(adapter->dev, &entry, 1);
59513 + if (ret)
59514 + return ret;
59515 +
59516 + *irq = entry.vector;
59517 +
59518 + return 0;
59519 +}
59520 +
59521 +static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
59522 +{
59523 + if (adapter->irq) {
59524 + free_irq(adapter->irq, adapter);
59525 + adapter->irq = 0;
59526 + }
59527 + if (adapter->use_msi) {
59528 + pci_disable_msi(adapter->dev);
59529 + adapter->use_msi = 0;
59530 + } else if (adapter->use_msix) {
59531 + pci_disable_msix(adapter->dev);
59532 + adapter->use_msix = 0;
59533 + }
59534 +}
59535 +
59536 +static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
59537 +{
59538 + pvscsi_shutdown_intr(adapter);
59539 +
59540 + if (adapter->workqueue)
59541 + destroy_workqueue(adapter->workqueue);
59542 +
59543 + if (adapter->mmioBase)
59544 + pci_iounmap(adapter->dev, adapter->mmioBase);
59545 +
59546 + pci_release_regions(adapter->dev);
59547 +
59548 + if (adapter->cmd_map) {
59549 + pvscsi_free_sgls(adapter);
59550 + kfree(adapter->cmd_map);
59551 + }
59552 +
59553 + if (adapter->rings_state)
59554 + pci_free_consistent(adapter->dev, PAGE_SIZE,
59555 + adapter->rings_state, adapter->ringStatePA);
59556 +
59557 + if (adapter->req_ring)
59558 + pci_free_consistent(adapter->dev,
59559 + adapter->req_pages * PAGE_SIZE,
59560 + adapter->req_ring, adapter->reqRingPA);
59561 +
59562 + if (adapter->cmp_ring)
59563 + pci_free_consistent(adapter->dev,
59564 + adapter->cmp_pages * PAGE_SIZE,
59565 + adapter->cmp_ring, adapter->cmpRingPA);
59566 +
59567 + if (adapter->msg_ring)
59568 + pci_free_consistent(adapter->dev,
59569 + adapter->msg_pages * PAGE_SIZE,
59570 + adapter->msg_ring, adapter->msgRingPA);
59571 +}
59572 +
59573 +/*
59574 + * Allocate scatter gather lists.
59575 + *
59576 + * These are statically allocated. Trying to be clever was not worth it.
59577 + *
59578 + * Dynamic allocation can fail, and we can't go deeep into the memory
59579 + * allocator, since we're a SCSI driver, and trying too hard to allocate
59580 + * memory might generate disk I/O. We also don't want to fail disk I/O
59581 + * in that case because we can't get an allocation - the I/O could be
59582 + * trying to swap out data to free memory. Since that is pathological,
59583 + * just use a statically allocated scatter list.
59584 + *
59585 + */
59586 +static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
59587 +{
59588 + struct pvscsi_ctx *ctx;
59589 + int i;
59590 +
59591 + ctx = adapter->cmd_map;
59592 + BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
59593 +
59594 + for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
59595 + ctx->sgl = kmalloc(SGL_SIZE, GFP_KERNEL);
59596 + ctx->sglPA = 0;
59597 + BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
59598 + if (!ctx->sgl) {
59599 + for (; i >= 0; --i, --ctx) {
59600 + kfree(ctx->sgl);
59601 + ctx->sgl = NULL;
59602 + }
59603 + return -ENOMEM;
59604 + }
59605 + }
59606 +
59607 + return 0;
59608 +}
59609 +
59610 +static int __devinit pvscsi_probe(struct pci_dev *pdev,
59611 + const struct pci_device_id *id)
59612 +{
59613 + struct pvscsi_adapter *adapter;
59614 + struct Scsi_Host *host;
59615 + unsigned int i;
59616 + int error;
59617 +
59618 + error = -ENODEV;
59619 +
59620 + if (pci_enable_device(pdev))
59621 + return error;
59622 +
59623 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
59624 + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
59625 + printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
59626 + } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
59627 + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
59628 + printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
59629 + } else {
59630 + printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
59631 + goto out_disable_device;
59632 + }
59633 +
59634 + pvscsi_template.can_queue =
59635 + min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
59636 + PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
59637 + pvscsi_template.cmd_per_lun =
59638 + min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
59639 + host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
59640 + if (!host) {
59641 + printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
59642 + goto out_disable_device;
59643 + }
59644 +
59645 + adapter = shost_priv(host);
59646 + memset(adapter, 0, sizeof(*adapter));
59647 + adapter->dev = pdev;
59648 + adapter->host = host;
59649 +
59650 + spin_lock_init(&adapter->hw_lock);
59651 +
59652 + host->max_channel = 0;
59653 + host->max_id = 16;
59654 + host->max_lun = 1;
59655 + host->max_cmd_len = 16;
59656 +
59657 + adapter->rev = pdev->revision;
59658 +
59659 + if (pci_request_regions(pdev, "vmw_pvscsi")) {
59660 + printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
59661 + goto out_free_host;
59662 + }
59663 +
59664 + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
59665 + if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
59666 + continue;
59667 +
59668 + if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
59669 + continue;
59670 +
59671 + break;
59672 + }
59673 +
59674 + if (i == DEVICE_COUNT_RESOURCE) {
59675 + printk(KERN_ERR
59676 + "vmw_pvscsi: adapter has no suitable MMIO region\n");
59677 + goto out_release_resources;
59678 + }
59679 +
59680 + adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
59681 +
59682 + if (!adapter->mmioBase) {
59683 + printk(KERN_ERR
59684 + "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
59685 + i, PVSCSI_MEM_SPACE_SIZE);
59686 + goto out_release_resources;
59687 + }
59688 +
59689 + pci_set_master(pdev);
59690 + pci_set_drvdata(pdev, host);
59691 +
59692 + ll_adapter_reset(adapter);
59693 +
59694 + adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
59695 +
59696 + error = pvscsi_allocate_rings(adapter);
59697 + if (error) {
59698 + printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
59699 + goto out_release_resources;
59700 + }
59701 +
59702 + /*
59703 + * From this point on we should reset the adapter if anything goes
59704 + * wrong.
59705 + */
59706 + pvscsi_setup_all_rings(adapter);
59707 +
59708 + adapter->cmd_map = kcalloc(adapter->req_depth,
59709 + sizeof(struct pvscsi_ctx), GFP_KERNEL);
59710 + if (!adapter->cmd_map) {
59711 + printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
59712 + error = -ENOMEM;
59713 + goto out_reset_adapter;
59714 + }
59715 +
59716 + INIT_LIST_HEAD(&adapter->cmd_pool);
59717 + for (i = 0; i < adapter->req_depth; i++) {
59718 + struct pvscsi_ctx *ctx = adapter->cmd_map + i;
59719 + list_add(&ctx->list, &adapter->cmd_pool);
59720 + }
59721 +
59722 + error = pvscsi_allocate_sg(adapter);
59723 + if (error) {
59724 + printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
59725 + goto out_reset_adapter;
59726 + }
59727 +
59728 + if (!pvscsi_disable_msix &&
59729 + pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
59730 + printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
59731 + adapter->use_msix = 1;
59732 + } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
59733 + printk(KERN_INFO "vmw_pvscsi: using MSI\n");
59734 + adapter->use_msi = 1;
59735 + adapter->irq = pdev->irq;
59736 + } else {
59737 + printk(KERN_INFO "vmw_pvscsi: using INTx\n");
59738 + adapter->irq = pdev->irq;
59739 + }
59740 +
59741 + error = request_irq(adapter->irq, pvscsi_isr, IRQF_SHARED,
59742 + "vmw_pvscsi", adapter);
59743 + if (error) {
59744 + printk(KERN_ERR
59745 + "vmw_pvscsi: unable to request IRQ: %d\n", error);
59746 + adapter->irq = 0;
59747 + goto out_reset_adapter;
59748 + }
59749 +
59750 + error = scsi_add_host(host, &pdev->dev);
59751 + if (error) {
59752 + printk(KERN_ERR
59753 + "vmw_pvscsi: scsi_add_host failed: %d\n", error);
59754 + goto out_reset_adapter;
59755 + }
59756 +
59757 + dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
59758 + adapter->rev, host->host_no);
59759 +
59760 + pvscsi_unmask_intr(adapter);
59761 +
59762 + scsi_scan_host(host);
59763 +
59764 + return 0;
59765 +
59766 +out_reset_adapter:
59767 + ll_adapter_reset(adapter);
59768 +out_release_resources:
59769 + pvscsi_release_resources(adapter);
59770 +out_free_host:
59771 + scsi_host_put(host);
59772 +out_disable_device:
59773 + pci_set_drvdata(pdev, NULL);
59774 + pci_disable_device(pdev);
59775 +
59776 + return error;
59777 +}
59778 +
59779 +static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
59780 +{
59781 + pvscsi_mask_intr(adapter);
59782 +
59783 + if (adapter->workqueue)
59784 + flush_workqueue(adapter->workqueue);
59785 +
59786 + pvscsi_shutdown_intr(adapter);
59787 +
59788 + pvscsi_process_request_ring(adapter);
59789 + pvscsi_process_completion_ring(adapter);
59790 + ll_adapter_reset(adapter);
59791 +}
59792 +
59793 +static void pvscsi_shutdown(struct pci_dev *dev)
59794 +{
59795 + struct Scsi_Host *host = pci_get_drvdata(dev);
59796 + struct pvscsi_adapter *adapter = shost_priv(host);
59797 +
59798 + __pvscsi_shutdown(adapter);
59799 +}
59800 +
59801 +static void pvscsi_remove(struct pci_dev *pdev)
59802 +{
59803 + struct Scsi_Host *host = pci_get_drvdata(pdev);
59804 + struct pvscsi_adapter *adapter = shost_priv(host);
59805 +
59806 + scsi_remove_host(host);
59807 +
59808 + __pvscsi_shutdown(adapter);
59809 + pvscsi_release_resources(adapter);
59810 +
59811 + scsi_host_put(host);
59812 +
59813 + pci_set_drvdata(pdev, NULL);
59814 + pci_disable_device(pdev);
59815 +}
59816 +
59817 +static struct pci_driver pvscsi_pci_driver = {
59818 + .name = "vmw_pvscsi",
59819 + .id_table = pvscsi_pci_tbl,
59820 + .probe = pvscsi_probe,
59821 + .remove = __devexit_p(pvscsi_remove),
59822 + .shutdown = pvscsi_shutdown,
59823 +};
59824 +
59825 +static int __init pvscsi_init(void)
59826 +{
59827 + pr_info("%s - version %s\n",
59828 + PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
59829 + return pci_register_driver(&pvscsi_pci_driver);
59830 +}
59831 +
59832 +static void __exit pvscsi_exit(void)
59833 +{
59834 + pci_unregister_driver(&pvscsi_pci_driver);
59835 +}
59836 +
59837 +module_init(pvscsi_init);
59838 +module_exit(pvscsi_exit);
59839 diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
59840 new file mode 100644
59841 index 0000000..62e36e7
59842 --- /dev/null
59843 +++ b/drivers/scsi/vmw_pvscsi.h
59844 @@ -0,0 +1,397 @@
59845 +/*
59846 + * VMware PVSCSI header file
59847 + *
59848 + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
59849 + *
59850 + * This program is free software; you can redistribute it and/or modify it
59851 + * under the terms of the GNU General Public License as published by the
59852 + * Free Software Foundation; version 2 of the License and no later version.
59853 + *
59854 + * This program is distributed in the hope that it will be useful, but
59855 + * WITHOUT ANY WARRANTY; without even the implied warranty of
59856 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
59857 + * NON INFRINGEMENT. See the GNU General Public License for more
59858 + * details.
59859 + *
59860 + * You should have received a copy of the GNU General Public License
59861 + * along with this program; if not, write to the Free Software
59862 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
59863 + *
59864 + * Maintained by: Alok N Kataria <akataria@vmware.com>
59865 + *
59866 + */
59867 +
59868 +#ifndef _VMW_PVSCSI_H_
59869 +#define _VMW_PVSCSI_H_
59870 +
59871 +#include <linux/types.h>
59872 +
59873 +#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k"
59874 +
59875 +#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
59876 +
59877 +#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
59878 +
59879 +#define PCI_VENDOR_ID_VMWARE 0x15AD
59880 +#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
59881 +
59882 +/*
59883 + * host adapter status/error codes
59884 + */
59885 +enum HostBusAdapterStatus {
59886 + BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
59887 + BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
59888 + BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
59889 + BTSTAT_DATA_UNDERRUN = 0x0c,
59890 + BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
59891 + BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
59892 + BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
59893 + BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */
59894 + BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */
59895 + BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
59896 + BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */
59897 + BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */
59898 + BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
59899 + BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */
59900 + BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
59901 + BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */
59902 + BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */
59903 + BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
59904 + BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
59905 + BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
59906 + BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
59907 + BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
59908 +};
59909 +
59910 +/*
59911 + * Register offsets.
59912 + *
59913 + * These registers are accessible both via i/o space and mm i/o.
59914 + */
59915 +
59916 +enum PVSCSIRegOffset {
59917 + PVSCSI_REG_OFFSET_COMMAND = 0x0,
59918 + PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4,
59919 + PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8,
59920 + PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100,
59921 + PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104,
59922 + PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108,
59923 + PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c,
59924 + PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c,
59925 + PVSCSI_REG_OFFSET_INTR_MASK = 0x2010,
59926 + PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014,
59927 + PVSCSI_REG_OFFSET_DEBUG = 0x3018,
59928 + PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018,
59929 +};
59930 +
59931 +/*
59932 + * Virtual h/w commands.
59933 + */
59934 +
59935 +enum PVSCSICommands {
59936 + PVSCSI_CMD_FIRST = 0, /* has to be first */
59937 +
59938 + PVSCSI_CMD_ADAPTER_RESET = 1,
59939 + PVSCSI_CMD_ISSUE_SCSI = 2,
59940 + PVSCSI_CMD_SETUP_RINGS = 3,
59941 + PVSCSI_CMD_RESET_BUS = 4,
59942 + PVSCSI_CMD_RESET_DEVICE = 5,
59943 + PVSCSI_CMD_ABORT_CMD = 6,
59944 + PVSCSI_CMD_CONFIG = 7,
59945 + PVSCSI_CMD_SETUP_MSG_RING = 8,
59946 + PVSCSI_CMD_DEVICE_UNPLUG = 9,
59947 +
59948 + PVSCSI_CMD_LAST = 10 /* has to be last */
59949 +};
59950 +
59951 +/*
59952 + * Command descriptor for PVSCSI_CMD_RESET_DEVICE --
59953 + */
59954 +
59955 +struct PVSCSICmdDescResetDevice {
59956 + u32 target;
59957 + u8 lun[8];
59958 +} __packed;
59959 +
59960 +/*
59961 + * Command descriptor for PVSCSI_CMD_ABORT_CMD --
59962 + *
59963 + * - currently does not support specifying the LUN.
59964 + * - _pad should be 0.
59965 + */
59966 +
59967 +struct PVSCSICmdDescAbortCmd {
59968 + u64 context;
59969 + u32 target;
59970 + u32 _pad;
59971 +} __packed;
59972 +
59973 +/*
59974 + * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
59975 + *
59976 + * Notes:
59977 + * - reqRingNumPages and cmpRingNumPages need to be power of two.
59978 + * - reqRingNumPages and cmpRingNumPages need to be different from 0,
59979 + * - reqRingNumPages and cmpRingNumPages need to be inferior to
59980 + * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
59981 + */
59982 +
59983 +#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32
59984 +struct PVSCSICmdDescSetupRings {
59985 + u32 reqRingNumPages;
59986 + u32 cmpRingNumPages;
59987 + u64 ringsStatePPN;
59988 + u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
59989 + u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
59990 +} __packed;
59991 +
59992 +/*
59993 + * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING --
59994 + *
59995 + * Notes:
59996 + * - this command was not supported in the initial revision of the h/w
59997 + * interface. Before using it, you need to check that it is supported by
59998 + * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then
59999 + * immediately after read the 'command status' register:
60000 + * * a value of -1 means that the cmd is NOT supported,
60001 + * * a value != -1 means that the cmd IS supported.
60002 + * If it's supported the 'command status' register should return:
60003 + * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32).
60004 + * - this command should be issued _after_ the usual SETUP_RINGS so that the
60005 + * RingsState page is already setup. If not, the command is a nop.
60006 + * - numPages needs to be a power of two,
60007 + * - numPages needs to be different from 0,
60008 + * - _pad should be zero.
60009 + */
60010 +
60011 +#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16
60012 +
60013 +struct PVSCSICmdDescSetupMsgRing {
60014 + u32 numPages;
60015 + u32 _pad;
60016 + u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
60017 +} __packed;
60018 +
60019 +enum PVSCSIMsgType {
60020 + PVSCSI_MSG_DEV_ADDED = 0,
60021 + PVSCSI_MSG_DEV_REMOVED = 1,
60022 + PVSCSI_MSG_LAST = 2,
60023 +};
60024 +
60025 +/*
60026 + * Msg descriptor.
60027 + *
60028 + * sizeof(struct PVSCSIRingMsgDesc) == 128.
60029 + *
60030 + * - type is of type enum PVSCSIMsgType.
60031 + * - the content of args depend on the type of event being delivered.
60032 + */
60033 +
60034 +struct PVSCSIRingMsgDesc {
60035 + u32 type;
60036 + u32 args[31];
60037 +} __packed;
60038 +
60039 +struct PVSCSIMsgDescDevStatusChanged {
60040 + u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */
60041 + u32 bus;
60042 + u32 target;
60043 + u8 lun[8];
60044 + u32 pad[27];
60045 +} __packed;
60046 +
60047 +/*
60048 + * Rings state.
60049 + *
60050 + * - the fields:
60051 + * . msgProdIdx,
60052 + * . msgConsIdx,
60053 + * . msgNumEntriesLog2,
60054 + * .. are only used once the SETUP_MSG_RING cmd has been issued.
60055 + * - '_pad' helps to ensure that the msg related fields are on their own
60056 + * cache-line.
60057 + */
60058 +
60059 +struct PVSCSIRingsState {
60060 + u32 reqProdIdx;
60061 + u32 reqConsIdx;
60062 + u32 reqNumEntriesLog2;
60063 +
60064 + u32 cmpProdIdx;
60065 + u32 cmpConsIdx;
60066 + u32 cmpNumEntriesLog2;
60067 +
60068 + u8 _pad[104];
60069 +
60070 + u32 msgProdIdx;
60071 + u32 msgConsIdx;
60072 + u32 msgNumEntriesLog2;
60073 +} __packed;
60074 +
60075 +/*
60076 + * Request descriptor.
60077 + *
60078 + * sizeof(RingReqDesc) = 128
60079 + *
60080 + * - context: is a unique identifier of a command. It could normally be any
60081 + * 64bit value, however we currently store it in the serialNumber variable
60082 + * of struct SCSI_Command, so we have the following restrictions due to the
60083 + * way this field is handled in the vmkernel storage stack:
60084 + * * this value can't be 0,
60085 + * * the upper 32bit need to be 0 since serialNumber is as a u32.
60086 + * Currently tracked as PR 292060.
60087 + * - dataLen: contains the total number of bytes that need to be transferred.
60088 + * - dataAddr:
60089 + * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first
60090 + * s/g table segment, each s/g segment is entirely contained on a single
60091 + * page of physical memory,
60092 + * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of
60093 + * the buffer used for the DMA transfer,
60094 + * - flags:
60095 + * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above,
60096 + * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved,
60097 + * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory,
60098 + * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device,
60099 + * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than
60100 + * 16bytes. To be specified.
60101 + * - vcpuHint: vcpuId of the processor that will be most likely waiting for the
60102 + * completion of the i/o. For guest OSes that use lowest priority message
60103 + * delivery mode (such as windows), we use this "hint" to deliver the
60104 + * completion action to the proper vcpu. For now, we can use the vcpuId of
60105 + * the processor that initiated the i/o as a likely candidate for the vcpu
60106 + * that will be waiting for the completion..
60107 + * - bus should be 0: we currently only support bus 0 for now.
60108 + * - unused should be zero'd.
60109 + */
60110 +
60111 +#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0)
60112 +#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1)
60113 +#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2)
60114 +#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3)
60115 +#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4)
60116 +
60117 +struct PVSCSIRingReqDesc {
60118 + u64 context;
60119 + u64 dataAddr;
60120 + u64 dataLen;
60121 + u64 senseAddr;
60122 + u32 senseLen;
60123 + u32 flags;
60124 + u8 cdb[16];
60125 + u8 cdbLen;
60126 + u8 lun[8];
60127 + u8 tag;
60128 + u8 bus;
60129 + u8 target;
60130 + u8 vcpuHint;
60131 + u8 unused[59];
60132 +} __packed;
60133 +
60134 +/*
60135 + * Scatter-gather list management.
60136 + *
60137 + * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the
60138 + * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g
60139 + * table segment.
60140 + *
60141 + * - each segment of the s/g table contain a succession of struct
60142 + * PVSCSISGElement.
60143 + * - each segment is entirely contained on a single physical page of memory.
60144 + * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in
60145 + * PVSCSISGElement.flags and in this case:
60146 + * * addr is the PA of the next s/g segment,
60147 + * * length is undefined, assumed to be 0.
60148 + */
60149 +
60150 +struct PVSCSISGElement {
60151 + u64 addr;
60152 + u32 length;
60153 + u32 flags;
60154 +} __packed;
60155 +
60156 +/*
60157 + * Completion descriptor.
60158 + *
60159 + * sizeof(RingCmpDesc) = 32
60160 + *
60161 + * - context: identifier of the command. The same thing that was specified
60162 + * under "context" as part of struct RingReqDesc at initiation time,
60163 + * - dataLen: number of bytes transferred for the actual i/o operation,
60164 + * - senseLen: number of bytes written into the sense buffer,
60165 + * - hostStatus: adapter status,
60166 + * - scsiStatus: device status,
60167 + * - _pad should be zero.
60168 + */
60169 +
60170 +struct PVSCSIRingCmpDesc {
60171 + u64 context;
60172 + u64 dataLen;
60173 + u32 senseLen;
60174 + u16 hostStatus;
60175 + u16 scsiStatus;
60176 + u32 _pad[2];
60177 +} __packed;
60178 +
60179 +/*
60180 + * Interrupt status / IRQ bits.
60181 + */
60182 +
60183 +#define PVSCSI_INTR_CMPL_0 (1 << 0)
60184 +#define PVSCSI_INTR_CMPL_1 (1 << 1)
60185 +#define PVSCSI_INTR_CMPL_MASK MASK(2)
60186 +
60187 +#define PVSCSI_INTR_MSG_0 (1 << 2)
60188 +#define PVSCSI_INTR_MSG_1 (1 << 3)
60189 +#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2)
60190 +
60191 +#define PVSCSI_INTR_ALL_SUPPORTED MASK(4)
60192 +
60193 +/*
60194 + * Number of MSI-X vectors supported.
60195 + */
60196 +#define PVSCSI_MAX_INTRS 24
60197 +
60198 +/*
60199 + * Enumeration of supported MSI-X vectors
60200 + */
60201 +#define PVSCSI_VECTOR_COMPLETION 0
60202 +
60203 +/*
60204 + * Misc constants for the rings.
60205 + */
60206 +
60207 +#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
60208 +#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
60209 +#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES
60210 +
60211 +#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \
60212 + (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc))
60213 +
60214 +#define PVSCSI_MAX_REQ_QUEUE_DEPTH \
60215 + (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE)
60216 +
60217 +#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1
60218 +#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1
60219 +#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2
60220 +#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2
60221 +#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2
60222 +
60223 +enum PVSCSIMemSpace {
60224 + PVSCSI_MEM_SPACE_COMMAND_PAGE = 0,
60225 + PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1,
60226 + PVSCSI_MEM_SPACE_MISC_PAGE = 2,
60227 + PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4,
60228 + PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6,
60229 + PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7,
60230 +};
60231 +
60232 +#define PVSCSI_MEM_SPACE_NUM_PAGES \
60233 + (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \
60234 + PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \
60235 + PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \
60236 + PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \
60237 + PVSCSI_MEM_SPACE_MSIX_NUM_PAGES)
60238 +
60239 +#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
60240 +
60241 +#endif /* _VMW_PVSCSI_H_ */
60242 diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
60243 index eadc1ab..2d81457 100644
60244 --- a/drivers/serial/kgdboc.c
60245 +++ b/drivers/serial/kgdboc.c
60246 @@ -18,7 +18,7 @@
60247
60248 #define MAX_CONFIG_LEN 40
60249
60250 -static struct kgdb_io kgdboc_io_ops;
60251 +static const struct kgdb_io kgdboc_io_ops;
60252
60253 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
60254 static int configured = -1;
60255 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
60256 module_put(THIS_MODULE);
60257 }
60258
60259 -static struct kgdb_io kgdboc_io_ops = {
60260 +static const struct kgdb_io kgdboc_io_ops = {
60261 .name = "kgdboc",
60262 .read_char = kgdboc_get_char,
60263 .write_char = kgdboc_put_char,
60264 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
60265 index b76f246..7f41af7 100644
60266 --- a/drivers/spi/spi.c
60267 +++ b/drivers/spi/spi.c
60268 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
60269 EXPORT_SYMBOL_GPL(spi_sync);
60270
60271 /* portable code must never pass more than 32 bytes */
60272 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
60273 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
60274
60275 static u8 *buf;
60276
60277 diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
60278 index b9b37ff..19dfa23 100644
60279 --- a/drivers/staging/android/binder.c
60280 +++ b/drivers/staging/android/binder.c
60281 @@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
60282 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
60283 }
60284
60285 -static struct vm_operations_struct binder_vm_ops = {
60286 +static const struct vm_operations_struct binder_vm_ops = {
60287 .open = binder_vma_open,
60288 .close = binder_vma_close,
60289 };
60290 diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
60291 index cda26bb..39fed3f 100644
60292 --- a/drivers/staging/b3dfg/b3dfg.c
60293 +++ b/drivers/staging/b3dfg/b3dfg.c
60294 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
60295 return VM_FAULT_NOPAGE;
60296 }
60297
60298 -static struct vm_operations_struct b3dfg_vm_ops = {
60299 +static const struct vm_operations_struct b3dfg_vm_ops = {
60300 .fault = b3dfg_vma_fault,
60301 };
60302
60303 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
60304 return r;
60305 }
60306
60307 -static struct file_operations b3dfg_fops = {
60308 +static const struct file_operations b3dfg_fops = {
60309 .owner = THIS_MODULE,
60310 .open = b3dfg_open,
60311 .release = b3dfg_release,
60312 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
60313 index 908f25a..c9a579b 100644
60314 --- a/drivers/staging/comedi/comedi_fops.c
60315 +++ b/drivers/staging/comedi/comedi_fops.c
60316 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
60317 mutex_unlock(&dev->mutex);
60318 }
60319
60320 -static struct vm_operations_struct comedi_vm_ops = {
60321 +static const struct vm_operations_struct comedi_vm_ops = {
60322 .close = comedi_unmap,
60323 };
60324
60325 diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
60326 index e55a0db..577b776 100644
60327 --- a/drivers/staging/dream/qdsp5/adsp_driver.c
60328 +++ b/drivers/staging/dream/qdsp5/adsp_driver.c
60329 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
60330 static dev_t adsp_devno;
60331 static struct class *adsp_class;
60332
60333 -static struct file_operations adsp_fops = {
60334 +static const struct file_operations adsp_fops = {
60335 .owner = THIS_MODULE,
60336 .open = adsp_open,
60337 .unlocked_ioctl = adsp_ioctl,
60338 diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
60339 index ad2390f..4116ee8 100644
60340 --- a/drivers/staging/dream/qdsp5/audio_aac.c
60341 +++ b/drivers/staging/dream/qdsp5/audio_aac.c
60342 @@ -1022,7 +1022,7 @@ done:
60343 return rc;
60344 }
60345
60346 -static struct file_operations audio_aac_fops = {
60347 +static const struct file_operations audio_aac_fops = {
60348 .owner = THIS_MODULE,
60349 .open = audio_open,
60350 .release = audio_release,
60351 diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
60352 index cd818a5..870b37b 100644
60353 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c
60354 +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
60355 @@ -833,7 +833,7 @@ done:
60356 return rc;
60357 }
60358
60359 -static struct file_operations audio_amrnb_fops = {
60360 +static const struct file_operations audio_amrnb_fops = {
60361 .owner = THIS_MODULE,
60362 .open = audamrnb_open,
60363 .release = audamrnb_release,
60364 diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
60365 index 4b43e18..cedafda 100644
60366 --- a/drivers/staging/dream/qdsp5/audio_evrc.c
60367 +++ b/drivers/staging/dream/qdsp5/audio_evrc.c
60368 @@ -805,7 +805,7 @@ dma_fail:
60369 return rc;
60370 }
60371
60372 -static struct file_operations audio_evrc_fops = {
60373 +static const struct file_operations audio_evrc_fops = {
60374 .owner = THIS_MODULE,
60375 .open = audevrc_open,
60376 .release = audevrc_release,
60377 diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
60378 index 3d950a2..9431118 100644
60379 --- a/drivers/staging/dream/qdsp5/audio_in.c
60380 +++ b/drivers/staging/dream/qdsp5/audio_in.c
60381 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
60382 return 0;
60383 }
60384
60385 -static struct file_operations audio_fops = {
60386 +static const struct file_operations audio_fops = {
60387 .owner = THIS_MODULE,
60388 .open = audio_in_open,
60389 .release = audio_in_release,
60390 @@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
60391 .unlocked_ioctl = audio_in_ioctl,
60392 };
60393
60394 -static struct file_operations audpre_fops = {
60395 +static const struct file_operations audpre_fops = {
60396 .owner = THIS_MODULE,
60397 .open = audpre_open,
60398 .unlocked_ioctl = audpre_ioctl,
60399 diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
60400 index b95574f..286c2f4 100644
60401 --- a/drivers/staging/dream/qdsp5/audio_mp3.c
60402 +++ b/drivers/staging/dream/qdsp5/audio_mp3.c
60403 @@ -941,7 +941,7 @@ done:
60404 return rc;
60405 }
60406
60407 -static struct file_operations audio_mp3_fops = {
60408 +static const struct file_operations audio_mp3_fops = {
60409 .owner = THIS_MODULE,
60410 .open = audio_open,
60411 .release = audio_release,
60412 diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
60413 index d1adcf6..f8f9833 100644
60414 --- a/drivers/staging/dream/qdsp5/audio_out.c
60415 +++ b/drivers/staging/dream/qdsp5/audio_out.c
60416 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
60417 return 0;
60418 }
60419
60420 -static struct file_operations audio_fops = {
60421 +static const struct file_operations audio_fops = {
60422 .owner = THIS_MODULE,
60423 .open = audio_open,
60424 .release = audio_release,
60425 @@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
60426 .unlocked_ioctl = audio_ioctl,
60427 };
60428
60429 -static struct file_operations audpp_fops = {
60430 +static const struct file_operations audpp_fops = {
60431 .owner = THIS_MODULE,
60432 .open = audpp_open,
60433 .unlocked_ioctl = audpp_ioctl,
60434 diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
60435 index f0f50e3..f6b9dbc 100644
60436 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c
60437 +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
60438 @@ -816,7 +816,7 @@ err:
60439 return rc;
60440 }
60441
60442 -static struct file_operations audio_qcelp_fops = {
60443 +static const struct file_operations audio_qcelp_fops = {
60444 .owner = THIS_MODULE,
60445 .open = audqcelp_open,
60446 .release = audqcelp_release,
60447 diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
60448 index 037d7ff..5469ec3 100644
60449 --- a/drivers/staging/dream/qdsp5/snd.c
60450 +++ b/drivers/staging/dream/qdsp5/snd.c
60451 @@ -242,7 +242,7 @@ err:
60452 return rc;
60453 }
60454
60455 -static struct file_operations snd_fops = {
60456 +static const struct file_operations snd_fops = {
60457 .owner = THIS_MODULE,
60458 .open = snd_open,
60459 .release = snd_release,
60460 diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
60461 index d4e7d88..0ea632a 100644
60462 --- a/drivers/staging/dream/smd/smd_qmi.c
60463 +++ b/drivers/staging/dream/smd/smd_qmi.c
60464 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
60465 return 0;
60466 }
60467
60468 -static struct file_operations qmi_fops = {
60469 +static const struct file_operations qmi_fops = {
60470 .owner = THIS_MODULE,
60471 .read = qmi_read,
60472 .write = qmi_write,
60473 diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
60474 index cd3910b..ff053d3 100644
60475 --- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
60476 +++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
60477 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
60478 return rc;
60479 }
60480
60481 -static struct file_operations rpcrouter_server_fops = {
60482 +static const struct file_operations rpcrouter_server_fops = {
60483 .owner = THIS_MODULE,
60484 .open = rpcrouter_open,
60485 .release = rpcrouter_release,
60486 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
60487 .unlocked_ioctl = rpcrouter_ioctl,
60488 };
60489
60490 -static struct file_operations rpcrouter_router_fops = {
60491 +static const struct file_operations rpcrouter_router_fops = {
60492 .owner = THIS_MODULE,
60493 .open = rpcrouter_open,
60494 .release = rpcrouter_release,
60495 diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
60496 index c24e4e0..07665be 100644
60497 --- a/drivers/staging/dst/dcore.c
60498 +++ b/drivers/staging/dst/dcore.c
60499 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
60500 return 0;
60501 }
60502
60503 -static struct block_device_operations dst_blk_ops = {
60504 +static const struct block_device_operations dst_blk_ops = {
60505 .open = dst_bdev_open,
60506 .release = dst_bdev_release,
60507 .owner = THIS_MODULE,
60508 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
60509 n->size = ctl->size;
60510
60511 atomic_set(&n->refcnt, 1);
60512 - atomic_long_set(&n->gen, 0);
60513 + atomic_long_set_unchecked(&n->gen, 0);
60514 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
60515
60516 err = dst_node_sysfs_init(n);
60517 diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
60518 index 557d372..8d84422 100644
60519 --- a/drivers/staging/dst/trans.c
60520 +++ b/drivers/staging/dst/trans.c
60521 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
60522 t->error = 0;
60523 t->retries = 0;
60524 atomic_set(&t->refcnt, 1);
60525 - t->gen = atomic_long_inc_return(&n->gen);
60526 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
60527
60528 t->enc = bio_data_dir(bio);
60529 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
60530 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
60531 index 94f7752..d051514 100644
60532 --- a/drivers/staging/et131x/et1310_tx.c
60533 +++ b/drivers/staging/et131x/et1310_tx.c
60534 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
60535 struct net_device_stats *stats = &etdev->net_stats;
60536
60537 if (pMpTcb->Flags & fMP_DEST_BROAD)
60538 - atomic_inc(&etdev->Stats.brdcstxmt);
60539 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
60540 else if (pMpTcb->Flags & fMP_DEST_MULTI)
60541 - atomic_inc(&etdev->Stats.multixmt);
60542 + atomic_inc_unchecked(&etdev->Stats.multixmt);
60543 else
60544 - atomic_inc(&etdev->Stats.unixmt);
60545 + atomic_inc_unchecked(&etdev->Stats.unixmt);
60546
60547 if (pMpTcb->Packet) {
60548 stats->tx_bytes += pMpTcb->Packet->len;
60549 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
60550 index 1dfe06f..f469b4d 100644
60551 --- a/drivers/staging/et131x/et131x_adapter.h
60552 +++ b/drivers/staging/et131x/et131x_adapter.h
60553 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
60554 * operations
60555 */
60556 u32 unircv; /* # multicast packets received */
60557 - atomic_t unixmt; /* # multicast packets for Tx */
60558 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
60559 u32 multircv; /* # multicast packets received */
60560 - atomic_t multixmt; /* # multicast packets for Tx */
60561 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
60562 u32 brdcstrcv; /* # broadcast packets received */
60563 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
60564 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
60565 u32 norcvbuf; /* # Rx packets discarded */
60566 u32 noxmtbuf; /* # Tx packets discarded */
60567
60568 diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
60569 index 4bd353a..e28f455 100644
60570 --- a/drivers/staging/go7007/go7007-v4l2.c
60571 +++ b/drivers/staging/go7007/go7007-v4l2.c
60572 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
60573 return 0;
60574 }
60575
60576 -static struct vm_operations_struct go7007_vm_ops = {
60577 +static const struct vm_operations_struct go7007_vm_ops = {
60578 .open = go7007_vm_open,
60579 .close = go7007_vm_close,
60580 .fault = go7007_vm_fault,
60581 diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
60582 index 366dc95..b974d87 100644
60583 --- a/drivers/staging/hv/Channel.c
60584 +++ b/drivers/staging/hv/Channel.c
60585 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
60586
60587 DPRINT_ENTER(VMBUS);
60588
60589 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
60590 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
60591 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
60592 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
60593
60594 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
60595 ASSERT(msgInfo != NULL);
60596 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
60597 index b12237f..01ae28a 100644
60598 --- a/drivers/staging/hv/Hv.c
60599 +++ b/drivers/staging/hv/Hv.c
60600 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
60601 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
60602 u32 outputAddressHi = outputAddress >> 32;
60603 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
60604 - volatile void *hypercallPage = gHvContext.HypercallPage;
60605 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
60606
60607 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
60608 Control, Input, Output);
60609 diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
60610 index d089bb1..2ebc158 100644
60611 --- a/drivers/staging/hv/VmbusApi.h
60612 +++ b/drivers/staging/hv/VmbusApi.h
60613 @@ -109,7 +109,7 @@ struct vmbus_channel_interface {
60614 u32 *GpadlHandle);
60615 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
60616 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
60617 -};
60618 +} __no_const;
60619
60620 /* Base driver object */
60621 struct hv_driver {
60622 diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
60623 index 5a37cce..6ecc88c 100644
60624 --- a/drivers/staging/hv/VmbusPrivate.h
60625 +++ b/drivers/staging/hv/VmbusPrivate.h
60626 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
60627 struct VMBUS_CONNECTION {
60628 enum VMBUS_CONNECT_STATE ConnectState;
60629
60630 - atomic_t NextGpadlHandle;
60631 + atomic_unchecked_t NextGpadlHandle;
60632
60633 /*
60634 * Represents channel interrupts. Each bit position represents a
60635 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
60636 index 871a202..ca50ddf 100644
60637 --- a/drivers/staging/hv/blkvsc_drv.c
60638 +++ b/drivers/staging/hv/blkvsc_drv.c
60639 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
60640 /* The one and only one */
60641 static struct blkvsc_driver_context g_blkvsc_drv;
60642
60643 -static struct block_device_operations block_ops = {
60644 +static const struct block_device_operations block_ops = {
60645 .owner = THIS_MODULE,
60646 .open = blkvsc_open,
60647 .release = blkvsc_release,
60648 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
60649 index 6acc49a..fbc8d46 100644
60650 --- a/drivers/staging/hv/vmbus_drv.c
60651 +++ b/drivers/staging/hv/vmbus_drv.c
60652 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
60653 to_device_context(root_device_obj);
60654 struct device_context *child_device_ctx =
60655 to_device_context(child_device_obj);
60656 - static atomic_t device_num = ATOMIC_INIT(0);
60657 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
60658
60659 DPRINT_ENTER(VMBUS_DRV);
60660
60661 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
60662
60663 /* Set the device name. Otherwise, device_register() will fail. */
60664 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
60665 - atomic_inc_return(&device_num));
60666 + atomic_inc_return_unchecked(&device_num));
60667
60668 /* The new device belongs to this bus */
60669 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
60670 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
60671 index d926189..17b19fd 100644
60672 --- a/drivers/staging/iio/ring_generic.h
60673 +++ b/drivers/staging/iio/ring_generic.h
60674 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
60675
60676 int (*is_enabled)(struct iio_ring_buffer *ring);
60677 int (*enable)(struct iio_ring_buffer *ring);
60678 -};
60679 +} __no_const;
60680
60681 /**
60682 * struct iio_ring_buffer - general ring buffer structure
60683 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
60684 index 1b237b7..88c624e 100644
60685 --- a/drivers/staging/octeon/ethernet-rx.c
60686 +++ b/drivers/staging/octeon/ethernet-rx.c
60687 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
60688 /* Increment RX stats for virtual ports */
60689 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
60690 #ifdef CONFIG_64BIT
60691 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
60692 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
60693 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
60694 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
60695 #else
60696 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
60697 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
60698 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
60699 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
60700 #endif
60701 }
60702 netif_receive_skb(skb);
60703 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
60704 dev->name);
60705 */
60706 #ifdef CONFIG_64BIT
60707 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
60708 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
60709 #else
60710 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
60711 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
60712 #endif
60713 dev_kfree_skb_irq(skb);
60714 }
60715 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
60716 index 492c502..d9909f1 100644
60717 --- a/drivers/staging/octeon/ethernet.c
60718 +++ b/drivers/staging/octeon/ethernet.c
60719 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
60720 * since the RX tasklet also increments it.
60721 */
60722 #ifdef CONFIG_64BIT
60723 - atomic64_add(rx_status.dropped_packets,
60724 - (atomic64_t *)&priv->stats.rx_dropped);
60725 + atomic64_add_unchecked(rx_status.dropped_packets,
60726 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
60727 #else
60728 - atomic_add(rx_status.dropped_packets,
60729 - (atomic_t *)&priv->stats.rx_dropped);
60730 + atomic_add_unchecked(rx_status.dropped_packets,
60731 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
60732 #endif
60733 }
60734
60735 diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
60736 index a35bd5d..28fff45 100644
60737 --- a/drivers/staging/otus/80211core/pub_zfi.h
60738 +++ b/drivers/staging/otus/80211core/pub_zfi.h
60739 @@ -531,7 +531,7 @@ struct zsCbFuncTbl
60740 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
60741
60742 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
60743 -};
60744 +} __no_const;
60745
60746 extern void zfZeroMemory(u8_t* va, u16_t length);
60747 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
60748 diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
60749 index c39a25f..696f5aa 100644
60750 --- a/drivers/staging/panel/panel.c
60751 +++ b/drivers/staging/panel/panel.c
60752 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
60753 return 0;
60754 }
60755
60756 -static struct file_operations lcd_fops = {
60757 +static const struct file_operations lcd_fops = {
60758 .write = lcd_write,
60759 .open = lcd_open,
60760 .release = lcd_release,
60761 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
60762 return 0;
60763 }
60764
60765 -static struct file_operations keypad_fops = {
60766 +static const struct file_operations keypad_fops = {
60767 .read = keypad_read, /* read */
60768 .open = keypad_open, /* open */
60769 .release = keypad_release, /* close */
60770 diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
60771 index 270ebcb..37e46af 100644
60772 --- a/drivers/staging/phison/phison.c
60773 +++ b/drivers/staging/phison/phison.c
60774 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
60775 ATA_BMDMA_SHT(DRV_NAME),
60776 };
60777
60778 -static struct ata_port_operations phison_ops = {
60779 +static const struct ata_port_operations phison_ops = {
60780 .inherits = &ata_bmdma_port_ops,
60781 .prereset = phison_pre_reset,
60782 };
60783 diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
60784 index 2eb8e3d..57616a7 100644
60785 --- a/drivers/staging/poch/poch.c
60786 +++ b/drivers/staging/poch/poch.c
60787 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
60788 return 0;
60789 }
60790
60791 -static struct file_operations poch_fops = {
60792 +static const struct file_operations poch_fops = {
60793 .owner = THIS_MODULE,
60794 .open = poch_open,
60795 .release = poch_release,
60796 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
60797 index c94de31..19402bc 100644
60798 --- a/drivers/staging/pohmelfs/inode.c
60799 +++ b/drivers/staging/pohmelfs/inode.c
60800 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
60801 mutex_init(&psb->mcache_lock);
60802 psb->mcache_root = RB_ROOT;
60803 psb->mcache_timeout = msecs_to_jiffies(5000);
60804 - atomic_long_set(&psb->mcache_gen, 0);
60805 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
60806
60807 psb->trans_max_pages = 100;
60808
60809 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
60810 INIT_LIST_HEAD(&psb->crypto_ready_list);
60811 INIT_LIST_HEAD(&psb->crypto_active_list);
60812
60813 - atomic_set(&psb->trans_gen, 1);
60814 + atomic_set_unchecked(&psb->trans_gen, 1);
60815 atomic_long_set(&psb->total_inodes, 0);
60816
60817 mutex_init(&psb->state_lock);
60818 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
60819 index e22665c..a2a9390 100644
60820 --- a/drivers/staging/pohmelfs/mcache.c
60821 +++ b/drivers/staging/pohmelfs/mcache.c
60822 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
60823 m->data = data;
60824 m->start = start;
60825 m->size = size;
60826 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
60827 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
60828
60829 mutex_lock(&psb->mcache_lock);
60830 err = pohmelfs_mcache_insert(psb, m);
60831 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
60832 index 623a07d..4035c19 100644
60833 --- a/drivers/staging/pohmelfs/netfs.h
60834 +++ b/drivers/staging/pohmelfs/netfs.h
60835 @@ -570,14 +570,14 @@ struct pohmelfs_config;
60836 struct pohmelfs_sb {
60837 struct rb_root mcache_root;
60838 struct mutex mcache_lock;
60839 - atomic_long_t mcache_gen;
60840 + atomic_long_unchecked_t mcache_gen;
60841 unsigned long mcache_timeout;
60842
60843 unsigned int idx;
60844
60845 unsigned int trans_retries;
60846
60847 - atomic_t trans_gen;
60848 + atomic_unchecked_t trans_gen;
60849
60850 unsigned int crypto_attached_size;
60851 unsigned int crypto_align_size;
60852 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
60853 index 36a2535..0591bf4 100644
60854 --- a/drivers/staging/pohmelfs/trans.c
60855 +++ b/drivers/staging/pohmelfs/trans.c
60856 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
60857 int err;
60858 struct netfs_cmd *cmd = t->iovec.iov_base;
60859
60860 - t->gen = atomic_inc_return(&psb->trans_gen);
60861 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
60862
60863 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
60864 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
60865 diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
60866 index f890a16..509ece8 100644
60867 --- a/drivers/staging/sep/sep_driver.c
60868 +++ b/drivers/staging/sep/sep_driver.c
60869 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
60870 static dev_t sep_devno;
60871
60872 /* the files operations structure of the driver */
60873 -static struct file_operations sep_file_operations = {
60874 +static const struct file_operations sep_file_operations = {
60875 .owner = THIS_MODULE,
60876 .ioctl = sep_ioctl,
60877 .poll = sep_poll,
60878 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
60879 index 5e16bc3..7655b10 100644
60880 --- a/drivers/staging/usbip/usbip_common.h
60881 +++ b/drivers/staging/usbip/usbip_common.h
60882 @@ -374,7 +374,7 @@ struct usbip_device {
60883 void (*shutdown)(struct usbip_device *);
60884 void (*reset)(struct usbip_device *);
60885 void (*unusable)(struct usbip_device *);
60886 - } eh_ops;
60887 + } __no_const eh_ops;
60888 };
60889
60890
60891 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
60892 index 57f7946..d9df23d 100644
60893 --- a/drivers/staging/usbip/vhci.h
60894 +++ b/drivers/staging/usbip/vhci.h
60895 @@ -92,7 +92,7 @@ struct vhci_hcd {
60896 unsigned resuming:1;
60897 unsigned long re_timeout;
60898
60899 - atomic_t seqnum;
60900 + atomic_unchecked_t seqnum;
60901
60902 /*
60903 * NOTE:
60904 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
60905 index 20cd7db..c2693ff 100644
60906 --- a/drivers/staging/usbip/vhci_hcd.c
60907 +++ b/drivers/staging/usbip/vhci_hcd.c
60908 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
60909 return;
60910 }
60911
60912 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
60913 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
60914 if (priv->seqnum == 0xffff)
60915 usbip_uinfo("seqnum max\n");
60916
60917 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
60918 return -ENOMEM;
60919 }
60920
60921 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
60922 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
60923 if (unlink->seqnum == 0xffff)
60924 usbip_uinfo("seqnum max\n");
60925
60926 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
60927 vdev->rhport = rhport;
60928 }
60929
60930 - atomic_set(&vhci->seqnum, 0);
60931 + atomic_set_unchecked(&vhci->seqnum, 0);
60932 spin_lock_init(&vhci->lock);
60933
60934
60935 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
60936 index 7fd76fe..673695a 100644
60937 --- a/drivers/staging/usbip/vhci_rx.c
60938 +++ b/drivers/staging/usbip/vhci_rx.c
60939 @@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
60940 usbip_uerr("cannot find a urb of seqnum %u\n",
60941 pdu->base.seqnum);
60942 usbip_uinfo("max seqnum %d\n",
60943 - atomic_read(&the_controller->seqnum));
60944 + atomic_read_unchecked(&the_controller->seqnum));
60945 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
60946 return;
60947 }
60948 diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
60949 index 7891288..8e31300 100644
60950 --- a/drivers/staging/vme/devices/vme_user.c
60951 +++ b/drivers/staging/vme/devices/vme_user.c
60952 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
60953 static int __init vme_user_probe(struct device *, int, int);
60954 static int __exit vme_user_remove(struct device *, int, int);
60955
60956 -static struct file_operations vme_user_fops = {
60957 +static const struct file_operations vme_user_fops = {
60958 .open = vme_user_open,
60959 .release = vme_user_release,
60960 .read = vme_user_read,
60961 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
60962 index 58abf44..00c1fc8 100644
60963 --- a/drivers/staging/vt6655/hostap.c
60964 +++ b/drivers/staging/vt6655/hostap.c
60965 @@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
60966 PSDevice apdev_priv;
60967 struct net_device *dev = pDevice->dev;
60968 int ret;
60969 - const struct net_device_ops apdev_netdev_ops = {
60970 + net_device_ops_no_const apdev_netdev_ops = {
60971 .ndo_start_xmit = pDevice->tx_80211,
60972 };
60973
60974 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
60975 index 0c8267a..db1f363 100644
60976 --- a/drivers/staging/vt6656/hostap.c
60977 +++ b/drivers/staging/vt6656/hostap.c
60978 @@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
60979 PSDevice apdev_priv;
60980 struct net_device *dev = pDevice->dev;
60981 int ret;
60982 - const struct net_device_ops apdev_netdev_ops = {
60983 + net_device_ops_no_const apdev_netdev_ops = {
60984 .ndo_start_xmit = pDevice->tx_80211,
60985 };
60986
60987 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
60988 index 925678b..da7f5ed 100644
60989 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
60990 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
60991 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
60992
60993 struct usbctlx_completor {
60994 int (*complete) (struct usbctlx_completor *);
60995 -};
60996 +} __no_const;
60997 typedef struct usbctlx_completor usbctlx_completor_t;
60998
60999 static int
61000 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
61001 index 40de151..924f268 100644
61002 --- a/drivers/telephony/ixj.c
61003 +++ b/drivers/telephony/ixj.c
61004 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
61005 bool mContinue;
61006 char *pIn, *pOut;
61007
61008 + pax_track_stack();
61009 +
61010 if (!SCI_Prepare(j))
61011 return 0;
61012
61013 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
61014 index e941367..b631f5a 100644
61015 --- a/drivers/uio/uio.c
61016 +++ b/drivers/uio/uio.c
61017 @@ -23,6 +23,7 @@
61018 #include <linux/string.h>
61019 #include <linux/kobject.h>
61020 #include <linux/uio_driver.h>
61021 +#include <asm/local.h>
61022
61023 #define UIO_MAX_DEVICES 255
61024
61025 @@ -30,10 +31,10 @@ struct uio_device {
61026 struct module *owner;
61027 struct device *dev;
61028 int minor;
61029 - atomic_t event;
61030 + atomic_unchecked_t event;
61031 struct fasync_struct *async_queue;
61032 wait_queue_head_t wait;
61033 - int vma_count;
61034 + local_t vma_count;
61035 struct uio_info *info;
61036 struct kobject *map_dir;
61037 struct kobject *portio_dir;
61038 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
61039 return entry->show(mem, buf);
61040 }
61041
61042 -static struct sysfs_ops map_sysfs_ops = {
61043 +static const struct sysfs_ops map_sysfs_ops = {
61044 .show = map_type_show,
61045 };
61046
61047 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
61048 return entry->show(port, buf);
61049 }
61050
61051 -static struct sysfs_ops portio_sysfs_ops = {
61052 +static const struct sysfs_ops portio_sysfs_ops = {
61053 .show = portio_type_show,
61054 };
61055
61056 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
61057 struct uio_device *idev = dev_get_drvdata(dev);
61058 if (idev)
61059 return sprintf(buf, "%u\n",
61060 - (unsigned int)atomic_read(&idev->event));
61061 + (unsigned int)atomic_read_unchecked(&idev->event));
61062 else
61063 return -ENODEV;
61064 }
61065 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
61066 {
61067 struct uio_device *idev = info->uio_dev;
61068
61069 - atomic_inc(&idev->event);
61070 + atomic_inc_unchecked(&idev->event);
61071 wake_up_interruptible(&idev->wait);
61072 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
61073 }
61074 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
61075 }
61076
61077 listener->dev = idev;
61078 - listener->event_count = atomic_read(&idev->event);
61079 + listener->event_count = atomic_read_unchecked(&idev->event);
61080 filep->private_data = listener;
61081
61082 if (idev->info->open) {
61083 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
61084 return -EIO;
61085
61086 poll_wait(filep, &idev->wait, wait);
61087 - if (listener->event_count != atomic_read(&idev->event))
61088 + if (listener->event_count != atomic_read_unchecked(&idev->event))
61089 return POLLIN | POLLRDNORM;
61090 return 0;
61091 }
61092 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
61093 do {
61094 set_current_state(TASK_INTERRUPTIBLE);
61095
61096 - event_count = atomic_read(&idev->event);
61097 + event_count = atomic_read_unchecked(&idev->event);
61098 if (event_count != listener->event_count) {
61099 if (copy_to_user(buf, &event_count, count))
61100 retval = -EFAULT;
61101 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
61102 static void uio_vma_open(struct vm_area_struct *vma)
61103 {
61104 struct uio_device *idev = vma->vm_private_data;
61105 - idev->vma_count++;
61106 + local_inc(&idev->vma_count);
61107 }
61108
61109 static void uio_vma_close(struct vm_area_struct *vma)
61110 {
61111 struct uio_device *idev = vma->vm_private_data;
61112 - idev->vma_count--;
61113 + local_dec(&idev->vma_count);
61114 }
61115
61116 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
61117 @@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
61118 idev->owner = owner;
61119 idev->info = info;
61120 init_waitqueue_head(&idev->wait);
61121 - atomic_set(&idev->event, 0);
61122 + atomic_set_unchecked(&idev->event, 0);
61123
61124 ret = uio_get_minor(idev);
61125 if (ret)
61126 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
61127 index fbea856..06efea6 100644
61128 --- a/drivers/usb/atm/usbatm.c
61129 +++ b/drivers/usb/atm/usbatm.c
61130 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61131 if (printk_ratelimit())
61132 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
61133 __func__, vpi, vci);
61134 - atomic_inc(&vcc->stats->rx_err);
61135 + atomic_inc_unchecked(&vcc->stats->rx_err);
61136 return;
61137 }
61138
61139 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61140 if (length > ATM_MAX_AAL5_PDU) {
61141 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
61142 __func__, length, vcc);
61143 - atomic_inc(&vcc->stats->rx_err);
61144 + atomic_inc_unchecked(&vcc->stats->rx_err);
61145 goto out;
61146 }
61147
61148 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61149 if (sarb->len < pdu_length) {
61150 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
61151 __func__, pdu_length, sarb->len, vcc);
61152 - atomic_inc(&vcc->stats->rx_err);
61153 + atomic_inc_unchecked(&vcc->stats->rx_err);
61154 goto out;
61155 }
61156
61157 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
61158 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
61159 __func__, vcc);
61160 - atomic_inc(&vcc->stats->rx_err);
61161 + atomic_inc_unchecked(&vcc->stats->rx_err);
61162 goto out;
61163 }
61164
61165 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61166 if (printk_ratelimit())
61167 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
61168 __func__, length);
61169 - atomic_inc(&vcc->stats->rx_drop);
61170 + atomic_inc_unchecked(&vcc->stats->rx_drop);
61171 goto out;
61172 }
61173
61174 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61175
61176 vcc->push(vcc, skb);
61177
61178 - atomic_inc(&vcc->stats->rx);
61179 + atomic_inc_unchecked(&vcc->stats->rx);
61180 out:
61181 skb_trim(sarb, 0);
61182 }
61183 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
61184 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
61185
61186 usbatm_pop(vcc, skb);
61187 - atomic_inc(&vcc->stats->tx);
61188 + atomic_inc_unchecked(&vcc->stats->tx);
61189
61190 skb = skb_dequeue(&instance->sndqueue);
61191 }
61192 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
61193 if (!left--)
61194 return sprintf(page,
61195 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
61196 - atomic_read(&atm_dev->stats.aal5.tx),
61197 - atomic_read(&atm_dev->stats.aal5.tx_err),
61198 - atomic_read(&atm_dev->stats.aal5.rx),
61199 - atomic_read(&atm_dev->stats.aal5.rx_err),
61200 - atomic_read(&atm_dev->stats.aal5.rx_drop));
61201 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
61202 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
61203 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
61204 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
61205 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
61206
61207 if (!left--) {
61208 if (instance->disconnected)
61209 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
61210 index 24e6205..fe5a5d4 100644
61211 --- a/drivers/usb/core/hcd.c
61212 +++ b/drivers/usb/core/hcd.c
61213 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
61214
61215 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
61216
61217 -struct usb_mon_operations *mon_ops;
61218 +const struct usb_mon_operations *mon_ops;
61219
61220 /*
61221 * The registration is unlocked.
61222 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
61223 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
61224 */
61225
61226 -int usb_mon_register (struct usb_mon_operations *ops)
61227 +int usb_mon_register (const struct usb_mon_operations *ops)
61228 {
61229
61230 if (mon_ops)
61231 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
61232 index bcbe104..9cfd1c6 100644
61233 --- a/drivers/usb/core/hcd.h
61234 +++ b/drivers/usb/core/hcd.h
61235 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
61236 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
61237
61238 struct usb_mon_operations {
61239 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
61240 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
61241 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
61242 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
61243 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
61244 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
61245 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
61246 };
61247
61248 -extern struct usb_mon_operations *mon_ops;
61249 +extern const struct usb_mon_operations *mon_ops;
61250
61251 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
61252 {
61253 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
61254 (*mon_ops->urb_complete)(bus, urb, status);
61255 }
61256
61257 -int usb_mon_register(struct usb_mon_operations *ops);
61258 +int usb_mon_register(const struct usb_mon_operations *ops);
61259 void usb_mon_deregister(void);
61260
61261 #else
61262 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
61263 index 62ff5e7..530b74e 100644
61264 --- a/drivers/usb/misc/appledisplay.c
61265 +++ b/drivers/usb/misc/appledisplay.c
61266 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
61267 return pdata->msgdata[1];
61268 }
61269
61270 -static struct backlight_ops appledisplay_bl_data = {
61271 +static const struct backlight_ops appledisplay_bl_data = {
61272 .get_brightness = appledisplay_bl_get_brightness,
61273 .update_status = appledisplay_bl_update_status,
61274 };
61275 diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
61276 index e0c2db3..bd8cb66 100644
61277 --- a/drivers/usb/mon/mon_main.c
61278 +++ b/drivers/usb/mon/mon_main.c
61279 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
61280 /*
61281 * Ops
61282 */
61283 -static struct usb_mon_operations mon_ops_0 = {
61284 +static const struct usb_mon_operations mon_ops_0 = {
61285 .urb_submit = mon_submit,
61286 .urb_submit_error = mon_submit_error,
61287 .urb_complete = mon_complete,
61288 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
61289 index d6bea3e..60b250e 100644
61290 --- a/drivers/usb/wusbcore/wa-hc.h
61291 +++ b/drivers/usb/wusbcore/wa-hc.h
61292 @@ -192,7 +192,7 @@ struct wahc {
61293 struct list_head xfer_delayed_list;
61294 spinlock_t xfer_list_lock;
61295 struct work_struct xfer_work;
61296 - atomic_t xfer_id_count;
61297 + atomic_unchecked_t xfer_id_count;
61298 };
61299
61300
61301 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
61302 INIT_LIST_HEAD(&wa->xfer_delayed_list);
61303 spin_lock_init(&wa->xfer_list_lock);
61304 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
61305 - atomic_set(&wa->xfer_id_count, 1);
61306 + atomic_set_unchecked(&wa->xfer_id_count, 1);
61307 }
61308
61309 /**
61310 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
61311 index 613a5fc..3174865 100644
61312 --- a/drivers/usb/wusbcore/wa-xfer.c
61313 +++ b/drivers/usb/wusbcore/wa-xfer.c
61314 @@ -293,7 +293,7 @@ out:
61315 */
61316 static void wa_xfer_id_init(struct wa_xfer *xfer)
61317 {
61318 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
61319 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
61320 }
61321
61322 /*
61323 diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
61324 index aa42fce..f8a828c 100644
61325 --- a/drivers/uwb/wlp/messages.c
61326 +++ b/drivers/uwb/wlp/messages.c
61327 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
61328 size_t len = skb->len;
61329 size_t used;
61330 ssize_t result;
61331 - struct wlp_nonce enonce, rnonce;
61332 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
61333 enum wlp_assc_error assc_err;
61334 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
61335 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
61336 diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
61337 index 0370399..6627c94 100644
61338 --- a/drivers/uwb/wlp/sysfs.c
61339 +++ b/drivers/uwb/wlp/sysfs.c
61340 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
61341 return ret;
61342 }
61343
61344 -static
61345 -struct sysfs_ops wss_sysfs_ops = {
61346 +static const struct sysfs_ops wss_sysfs_ops = {
61347 .show = wlp_wss_attr_show,
61348 .store = wlp_wss_attr_store,
61349 };
61350 diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
61351 index 8c5e432..5ee90ea 100644
61352 --- a/drivers/video/atmel_lcdfb.c
61353 +++ b/drivers/video/atmel_lcdfb.c
61354 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
61355 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
61356 }
61357
61358 -static struct backlight_ops atmel_lcdc_bl_ops = {
61359 +static const struct backlight_ops atmel_lcdc_bl_ops = {
61360 .update_status = atmel_bl_update_status,
61361 .get_brightness = atmel_bl_get_brightness,
61362 };
61363 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
61364 index e4e4d43..66bcbcc 100644
61365 --- a/drivers/video/aty/aty128fb.c
61366 +++ b/drivers/video/aty/aty128fb.c
61367 @@ -149,7 +149,7 @@ enum {
61368 };
61369
61370 /* Must match above enum */
61371 -static const char *r128_family[] __devinitdata = {
61372 +static const char *r128_family[] __devinitconst = {
61373 "AGP",
61374 "PCI",
61375 "PRO AGP",
61376 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
61377 return bd->props.brightness;
61378 }
61379
61380 -static struct backlight_ops aty128_bl_data = {
61381 +static const struct backlight_ops aty128_bl_data = {
61382 .get_brightness = aty128_bl_get_brightness,
61383 .update_status = aty128_bl_update_status,
61384 };
61385 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
61386 index 913b4a4..9295a38 100644
61387 --- a/drivers/video/aty/atyfb_base.c
61388 +++ b/drivers/video/aty/atyfb_base.c
61389 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
61390 return bd->props.brightness;
61391 }
61392
61393 -static struct backlight_ops aty_bl_data = {
61394 +static const struct backlight_ops aty_bl_data = {
61395 .get_brightness = aty_bl_get_brightness,
61396 .update_status = aty_bl_update_status,
61397 };
61398 diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
61399 index 1a056ad..221bd6a 100644
61400 --- a/drivers/video/aty/radeon_backlight.c
61401 +++ b/drivers/video/aty/radeon_backlight.c
61402 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
61403 return bd->props.brightness;
61404 }
61405
61406 -static struct backlight_ops radeon_bl_data = {
61407 +static const struct backlight_ops radeon_bl_data = {
61408 .get_brightness = radeon_bl_get_brightness,
61409 .update_status = radeon_bl_update_status,
61410 };
61411 diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
61412 index ad05da5..3cb2cb9 100644
61413 --- a/drivers/video/backlight/adp5520_bl.c
61414 +++ b/drivers/video/backlight/adp5520_bl.c
61415 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
61416 return error ? data->current_brightness : reg_val;
61417 }
61418
61419 -static struct backlight_ops adp5520_bl_ops = {
61420 +static const struct backlight_ops adp5520_bl_ops = {
61421 .update_status = adp5520_bl_update_status,
61422 .get_brightness = adp5520_bl_get_brightness,
61423 };
61424 diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
61425 index 2c3bdfc..d769b0b 100644
61426 --- a/drivers/video/backlight/adx_bl.c
61427 +++ b/drivers/video/backlight/adx_bl.c
61428 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
61429 return 1;
61430 }
61431
61432 -static struct backlight_ops adx_backlight_ops = {
61433 +static const struct backlight_ops adx_backlight_ops = {
61434 .options = 0,
61435 .update_status = adx_backlight_update_status,
61436 .get_brightness = adx_backlight_get_brightness,
61437 diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
61438 index 505c082..6b6b3cc 100644
61439 --- a/drivers/video/backlight/atmel-pwm-bl.c
61440 +++ b/drivers/video/backlight/atmel-pwm-bl.c
61441 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
61442 return pwm_channel_enable(&pwmbl->pwmc);
61443 }
61444
61445 -static struct backlight_ops atmel_pwm_bl_ops = {
61446 +static const struct backlight_ops atmel_pwm_bl_ops = {
61447 .get_brightness = atmel_pwm_bl_get_intensity,
61448 .update_status = atmel_pwm_bl_set_intensity,
61449 };
61450 diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
61451 index 5e20e6e..89025e6 100644
61452 --- a/drivers/video/backlight/backlight.c
61453 +++ b/drivers/video/backlight/backlight.c
61454 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
61455 * ERR_PTR() or a pointer to the newly allocated device.
61456 */
61457 struct backlight_device *backlight_device_register(const char *name,
61458 - struct device *parent, void *devdata, struct backlight_ops *ops)
61459 + struct device *parent, void *devdata, const struct backlight_ops *ops)
61460 {
61461 struct backlight_device *new_bd;
61462 int rc;
61463 diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
61464 index 9677494..b4bcf80 100644
61465 --- a/drivers/video/backlight/corgi_lcd.c
61466 +++ b/drivers/video/backlight/corgi_lcd.c
61467 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
61468 }
61469 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
61470
61471 -static struct backlight_ops corgi_bl_ops = {
61472 +static const struct backlight_ops corgi_bl_ops = {
61473 .get_brightness = corgi_bl_get_intensity,
61474 .update_status = corgi_bl_update_status,
61475 };
61476 diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
61477 index b9fe62b..2914bf1 100644
61478 --- a/drivers/video/backlight/cr_bllcd.c
61479 +++ b/drivers/video/backlight/cr_bllcd.c
61480 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
61481 return intensity;
61482 }
61483
61484 -static struct backlight_ops cr_backlight_ops = {
61485 +static const struct backlight_ops cr_backlight_ops = {
61486 .get_brightness = cr_backlight_get_intensity,
61487 .update_status = cr_backlight_set_intensity,
61488 };
61489 diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
61490 index 701a108..feacfd5 100644
61491 --- a/drivers/video/backlight/da903x_bl.c
61492 +++ b/drivers/video/backlight/da903x_bl.c
61493 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
61494 return data->current_brightness;
61495 }
61496
61497 -static struct backlight_ops da903x_backlight_ops = {
61498 +static const struct backlight_ops da903x_backlight_ops = {
61499 .update_status = da903x_backlight_update_status,
61500 .get_brightness = da903x_backlight_get_brightness,
61501 };
61502 diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
61503 index 6d27f62..e6d348e 100644
61504 --- a/drivers/video/backlight/generic_bl.c
61505 +++ b/drivers/video/backlight/generic_bl.c
61506 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
61507 }
61508 EXPORT_SYMBOL(corgibl_limit_intensity);
61509
61510 -static struct backlight_ops genericbl_ops = {
61511 +static const struct backlight_ops genericbl_ops = {
61512 .options = BL_CORE_SUSPENDRESUME,
61513 .get_brightness = genericbl_get_intensity,
61514 .update_status = genericbl_send_intensity,
61515 diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
61516 index 7fb4eef..f7cc528 100644
61517 --- a/drivers/video/backlight/hp680_bl.c
61518 +++ b/drivers/video/backlight/hp680_bl.c
61519 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
61520 return current_intensity;
61521 }
61522
61523 -static struct backlight_ops hp680bl_ops = {
61524 +static const struct backlight_ops hp680bl_ops = {
61525 .get_brightness = hp680bl_get_intensity,
61526 .update_status = hp680bl_set_intensity,
61527 };
61528 diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
61529 index 7aed256..db9071f 100644
61530 --- a/drivers/video/backlight/jornada720_bl.c
61531 +++ b/drivers/video/backlight/jornada720_bl.c
61532 @@ -93,7 +93,7 @@ out:
61533 return ret;
61534 }
61535
61536 -static struct backlight_ops jornada_bl_ops = {
61537 +static const struct backlight_ops jornada_bl_ops = {
61538 .get_brightness = jornada_bl_get_brightness,
61539 .update_status = jornada_bl_update_status,
61540 .options = BL_CORE_SUSPENDRESUME,
61541 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
61542 index a38fda1..939e7b8 100644
61543 --- a/drivers/video/backlight/kb3886_bl.c
61544 +++ b/drivers/video/backlight/kb3886_bl.c
61545 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
61546 return kb3886bl_intensity;
61547 }
61548
61549 -static struct backlight_ops kb3886bl_ops = {
61550 +static const struct backlight_ops kb3886bl_ops = {
61551 .get_brightness = kb3886bl_get_intensity,
61552 .update_status = kb3886bl_send_intensity,
61553 };
61554 diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
61555 index 6b488b8..00a9591 100644
61556 --- a/drivers/video/backlight/locomolcd.c
61557 +++ b/drivers/video/backlight/locomolcd.c
61558 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
61559 return current_intensity;
61560 }
61561
61562 -static struct backlight_ops locomobl_data = {
61563 +static const struct backlight_ops locomobl_data = {
61564 .get_brightness = locomolcd_get_intensity,
61565 .update_status = locomolcd_set_intensity,
61566 };
61567 diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
61568 index 99bdfa8..3dac448 100644
61569 --- a/drivers/video/backlight/mbp_nvidia_bl.c
61570 +++ b/drivers/video/backlight/mbp_nvidia_bl.c
61571 @@ -33,7 +33,7 @@ struct dmi_match_data {
61572 unsigned long iostart;
61573 unsigned long iolen;
61574 /* Backlight operations structure. */
61575 - struct backlight_ops backlight_ops;
61576 + const struct backlight_ops backlight_ops;
61577 };
61578
61579 /* Module parameters. */
61580 diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
61581 index cbad67e..3cf900e 100644
61582 --- a/drivers/video/backlight/omap1_bl.c
61583 +++ b/drivers/video/backlight/omap1_bl.c
61584 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
61585 return bl->current_intensity;
61586 }
61587
61588 -static struct backlight_ops omapbl_ops = {
61589 +static const struct backlight_ops omapbl_ops = {
61590 .get_brightness = omapbl_get_intensity,
61591 .update_status = omapbl_update_status,
61592 };
61593 diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
61594 index 9edaf24..075786e 100644
61595 --- a/drivers/video/backlight/progear_bl.c
61596 +++ b/drivers/video/backlight/progear_bl.c
61597 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
61598 return intensity - HW_LEVEL_MIN;
61599 }
61600
61601 -static struct backlight_ops progearbl_ops = {
61602 +static const struct backlight_ops progearbl_ops = {
61603 .get_brightness = progearbl_get_intensity,
61604 .update_status = progearbl_set_intensity,
61605 };
61606 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
61607 index 8871662..df9e0b3 100644
61608 --- a/drivers/video/backlight/pwm_bl.c
61609 +++ b/drivers/video/backlight/pwm_bl.c
61610 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
61611 return bl->props.brightness;
61612 }
61613
61614 -static struct backlight_ops pwm_backlight_ops = {
61615 +static const struct backlight_ops pwm_backlight_ops = {
61616 .update_status = pwm_backlight_update_status,
61617 .get_brightness = pwm_backlight_get_brightness,
61618 };
61619 diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
61620 index 43edbad..e14ce4d 100644
61621 --- a/drivers/video/backlight/tosa_bl.c
61622 +++ b/drivers/video/backlight/tosa_bl.c
61623 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
61624 return props->brightness;
61625 }
61626
61627 -static struct backlight_ops bl_ops = {
61628 +static const struct backlight_ops bl_ops = {
61629 .get_brightness = tosa_bl_get_brightness,
61630 .update_status = tosa_bl_update_status,
61631 };
61632 diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
61633 index 467bdb7..e32add3 100644
61634 --- a/drivers/video/backlight/wm831x_bl.c
61635 +++ b/drivers/video/backlight/wm831x_bl.c
61636 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
61637 return data->current_brightness;
61638 }
61639
61640 -static struct backlight_ops wm831x_backlight_ops = {
61641 +static const struct backlight_ops wm831x_backlight_ops = {
61642 .options = BL_CORE_SUSPENDRESUME,
61643 .update_status = wm831x_backlight_update_status,
61644 .get_brightness = wm831x_backlight_get_brightness,
61645 diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
61646 index e49ae5e..db4e6f7 100644
61647 --- a/drivers/video/bf54x-lq043fb.c
61648 +++ b/drivers/video/bf54x-lq043fb.c
61649 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
61650 return 0;
61651 }
61652
61653 -static struct backlight_ops bfin_lq043fb_bl_ops = {
61654 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
61655 .get_brightness = bl_get_brightness,
61656 };
61657
61658 diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
61659 index 2c72a7c..d523e52 100644
61660 --- a/drivers/video/bfin-t350mcqb-fb.c
61661 +++ b/drivers/video/bfin-t350mcqb-fb.c
61662 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
61663 return 0;
61664 }
61665
61666 -static struct backlight_ops bfin_lq043fb_bl_ops = {
61667 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
61668 .get_brightness = bl_get_brightness,
61669 };
61670
61671 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
61672 index f53b9f1..958bf4e 100644
61673 --- a/drivers/video/fbcmap.c
61674 +++ b/drivers/video/fbcmap.c
61675 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
61676 rc = -ENODEV;
61677 goto out;
61678 }
61679 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
61680 - !info->fbops->fb_setcmap)) {
61681 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
61682 rc = -EINVAL;
61683 goto out1;
61684 }
61685 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
61686 index 99bbd28..ad3829e 100644
61687 --- a/drivers/video/fbmem.c
61688 +++ b/drivers/video/fbmem.c
61689 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
61690 image->dx += image->width + 8;
61691 }
61692 } else if (rotate == FB_ROTATE_UD) {
61693 - for (x = 0; x < num && image->dx >= 0; x++) {
61694 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
61695 info->fbops->fb_imageblit(info, image);
61696 image->dx -= image->width + 8;
61697 }
61698 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
61699 image->dy += image->height + 8;
61700 }
61701 } else if (rotate == FB_ROTATE_CCW) {
61702 - for (x = 0; x < num && image->dy >= 0; x++) {
61703 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
61704 info->fbops->fb_imageblit(info, image);
61705 image->dy -= image->height + 8;
61706 }
61707 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
61708 int flags = info->flags;
61709 int ret = 0;
61710
61711 + pax_track_stack();
61712 +
61713 if (var->activate & FB_ACTIVATE_INV_MODE) {
61714 struct fb_videomode mode1, mode2;
61715
61716 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
61717 void __user *argp = (void __user *)arg;
61718 long ret = 0;
61719
61720 + pax_track_stack();
61721 +
61722 switch (cmd) {
61723 case FBIOGET_VSCREENINFO:
61724 if (!lock_fb_info(info))
61725 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
61726 return -EFAULT;
61727 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
61728 return -EINVAL;
61729 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
61730 + if (con2fb.framebuffer >= FB_MAX)
61731 return -EINVAL;
61732 if (!registered_fb[con2fb.framebuffer])
61733 request_module("fb%d", con2fb.framebuffer);
61734 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
61735 index f20eff8..3e4f622 100644
61736 --- a/drivers/video/geode/gx1fb_core.c
61737 +++ b/drivers/video/geode/gx1fb_core.c
61738 @@ -30,7 +30,7 @@ static int crt_option = 1;
61739 static char panel_option[32] = "";
61740
61741 /* Modes relevant to the GX1 (taken from modedb.c) */
61742 -static const struct fb_videomode __initdata gx1_modedb[] = {
61743 +static const struct fb_videomode __initconst gx1_modedb[] = {
61744 /* 640x480-60 VESA */
61745 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
61746 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
61747 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
61748 index 896e53d..4d87d0b 100644
61749 --- a/drivers/video/gxt4500.c
61750 +++ b/drivers/video/gxt4500.c
61751 @@ -156,7 +156,7 @@ struct gxt4500_par {
61752 static char *mode_option;
61753
61754 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
61755 -static const struct fb_videomode defaultmode __devinitdata = {
61756 +static const struct fb_videomode defaultmode __devinitconst = {
61757 .refresh = 60,
61758 .xres = 1280,
61759 .yres = 1024,
61760 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
61761 return 0;
61762 }
61763
61764 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
61765 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
61766 .id = "IBM GXT4500P",
61767 .type = FB_TYPE_PACKED_PIXELS,
61768 .visual = FB_VISUAL_PSEUDOCOLOR,
61769 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
61770 index f5bedee..28c6028 100644
61771 --- a/drivers/video/i810/i810_accel.c
61772 +++ b/drivers/video/i810/i810_accel.c
61773 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
61774 }
61775 }
61776 printk("ringbuffer lockup!!!\n");
61777 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
61778 i810_report_error(mmio);
61779 par->dev_flags |= LOCKUP;
61780 info->pixmap.scan_align = 1;
61781 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
61782 index 5743ea2..457f82c 100644
61783 --- a/drivers/video/i810/i810_main.c
61784 +++ b/drivers/video/i810/i810_main.c
61785 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
61786 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
61787
61788 /* PCI */
61789 -static const char *i810_pci_list[] __devinitdata = {
61790 +static const char *i810_pci_list[] __devinitconst = {
61791 "Intel(R) 810 Framebuffer Device" ,
61792 "Intel(R) 810-DC100 Framebuffer Device" ,
61793 "Intel(R) 810E Framebuffer Device" ,
61794 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
61795 index 3c14e43..eafa544 100644
61796 --- a/drivers/video/logo/logo_linux_clut224.ppm
61797 +++ b/drivers/video/logo/logo_linux_clut224.ppm
61798 @@ -1,1604 +1,1123 @@
61799 P3
61800 -# Standard 224-color Linux logo
61801 80 80
61802 255
61803 - 0 0 0 0 0 0 0 0 0 0 0 0
61804 - 0 0 0 0 0 0 0 0 0 0 0 0
61805 - 0 0 0 0 0 0 0 0 0 0 0 0
61806 - 0 0 0 0 0 0 0 0 0 0 0 0
61807 - 0 0 0 0 0 0 0 0 0 0 0 0
61808 - 0 0 0 0 0 0 0 0 0 0 0 0
61809 - 0 0 0 0 0 0 0 0 0 0 0 0
61810 - 0 0 0 0 0 0 0 0 0 0 0 0
61811 - 0 0 0 0 0 0 0 0 0 0 0 0
61812 - 6 6 6 6 6 6 10 10 10 10 10 10
61813 - 10 10 10 6 6 6 6 6 6 6 6 6
61814 - 0 0 0 0 0 0 0 0 0 0 0 0
61815 - 0 0 0 0 0 0 0 0 0 0 0 0
61816 - 0 0 0 0 0 0 0 0 0 0 0 0
61817 - 0 0 0 0 0 0 0 0 0 0 0 0
61818 - 0 0 0 0 0 0 0 0 0 0 0 0
61819 - 0 0 0 0 0 0 0 0 0 0 0 0
61820 - 0 0 0 0 0 0 0 0 0 0 0 0
61821 - 0 0 0 0 0 0 0 0 0 0 0 0
61822 - 0 0 0 0 0 0 0 0 0 0 0 0
61823 - 0 0 0 0 0 0 0 0 0 0 0 0
61824 - 0 0 0 0 0 0 0 0 0 0 0 0
61825 - 0 0 0 0 0 0 0 0 0 0 0 0
61826 - 0 0 0 0 0 0 0 0 0 0 0 0
61827 - 0 0 0 0 0 0 0 0 0 0 0 0
61828 - 0 0 0 0 0 0 0 0 0 0 0 0
61829 - 0 0 0 0 0 0 0 0 0 0 0 0
61830 - 0 0 0 0 0 0 0 0 0 0 0 0
61831 - 0 0 0 6 6 6 10 10 10 14 14 14
61832 - 22 22 22 26 26 26 30 30 30 34 34 34
61833 - 30 30 30 30 30 30 26 26 26 18 18 18
61834 - 14 14 14 10 10 10 6 6 6 0 0 0
61835 - 0 0 0 0 0 0 0 0 0 0 0 0
61836 - 0 0 0 0 0 0 0 0 0 0 0 0
61837 - 0 0 0 0 0 0 0 0 0 0 0 0
61838 - 0 0 0 0 0 0 0 0 0 0 0 0
61839 - 0 0 0 0 0 0 0 0 0 0 0 0
61840 - 0 0 0 0 0 0 0 0 0 0 0 0
61841 - 0 0 0 0 0 0 0 0 0 0 0 0
61842 - 0 0 0 0 0 0 0 0 0 0 0 0
61843 - 0 0 0 0 0 0 0 0 0 0 0 0
61844 - 0 0 0 0 0 1 0 0 1 0 0 0
61845 - 0 0 0 0 0 0 0 0 0 0 0 0
61846 - 0 0 0 0 0 0 0 0 0 0 0 0
61847 - 0 0 0 0 0 0 0 0 0 0 0 0
61848 - 0 0 0 0 0 0 0 0 0 0 0 0
61849 - 0 0 0 0 0 0 0 0 0 0 0 0
61850 - 0 0 0 0 0 0 0 0 0 0 0 0
61851 - 6 6 6 14 14 14 26 26 26 42 42 42
61852 - 54 54 54 66 66 66 78 78 78 78 78 78
61853 - 78 78 78 74 74 74 66 66 66 54 54 54
61854 - 42 42 42 26 26 26 18 18 18 10 10 10
61855 - 6 6 6 0 0 0 0 0 0 0 0 0
61856 - 0 0 0 0 0 0 0 0 0 0 0 0
61857 - 0 0 0 0 0 0 0 0 0 0 0 0
61858 - 0 0 0 0 0 0 0 0 0 0 0 0
61859 - 0 0 0 0 0 0 0 0 0 0 0 0
61860 - 0 0 0 0 0 0 0 0 0 0 0 0
61861 - 0 0 0 0 0 0 0 0 0 0 0 0
61862 - 0 0 0 0 0 0 0 0 0 0 0 0
61863 - 0 0 0 0 0 0 0 0 0 0 0 0
61864 - 0 0 1 0 0 0 0 0 0 0 0 0
61865 - 0 0 0 0 0 0 0 0 0 0 0 0
61866 - 0 0 0 0 0 0 0 0 0 0 0 0
61867 - 0 0 0 0 0 0 0 0 0 0 0 0
61868 - 0 0 0 0 0 0 0 0 0 0 0 0
61869 - 0 0 0 0 0 0 0 0 0 0 0 0
61870 - 0 0 0 0 0 0 0 0 0 10 10 10
61871 - 22 22 22 42 42 42 66 66 66 86 86 86
61872 - 66 66 66 38 38 38 38 38 38 22 22 22
61873 - 26 26 26 34 34 34 54 54 54 66 66 66
61874 - 86 86 86 70 70 70 46 46 46 26 26 26
61875 - 14 14 14 6 6 6 0 0 0 0 0 0
61876 - 0 0 0 0 0 0 0 0 0 0 0 0
61877 - 0 0 0 0 0 0 0 0 0 0 0 0
61878 - 0 0 0 0 0 0 0 0 0 0 0 0
61879 - 0 0 0 0 0 0 0 0 0 0 0 0
61880 - 0 0 0 0 0 0 0 0 0 0 0 0
61881 - 0 0 0 0 0 0 0 0 0 0 0 0
61882 - 0 0 0 0 0 0 0 0 0 0 0 0
61883 - 0 0 0 0 0 0 0 0 0 0 0 0
61884 - 0 0 1 0 0 1 0 0 1 0 0 0
61885 - 0 0 0 0 0 0 0 0 0 0 0 0
61886 - 0 0 0 0 0 0 0 0 0 0 0 0
61887 - 0 0 0 0 0 0 0 0 0 0 0 0
61888 - 0 0 0 0 0 0 0 0 0 0 0 0
61889 - 0 0 0 0 0 0 0 0 0 0 0 0
61890 - 0 0 0 0 0 0 10 10 10 26 26 26
61891 - 50 50 50 82 82 82 58 58 58 6 6 6
61892 - 2 2 6 2 2 6 2 2 6 2 2 6
61893 - 2 2 6 2 2 6 2 2 6 2 2 6
61894 - 6 6 6 54 54 54 86 86 86 66 66 66
61895 - 38 38 38 18 18 18 6 6 6 0 0 0
61896 - 0 0 0 0 0 0 0 0 0 0 0 0
61897 - 0 0 0 0 0 0 0 0 0 0 0 0
61898 - 0 0 0 0 0 0 0 0 0 0 0 0
61899 - 0 0 0 0 0 0 0 0 0 0 0 0
61900 - 0 0 0 0 0 0 0 0 0 0 0 0
61901 - 0 0 0 0 0 0 0 0 0 0 0 0
61902 - 0 0 0 0 0 0 0 0 0 0 0 0
61903 - 0 0 0 0 0 0 0 0 0 0 0 0
61904 - 0 0 0 0 0 0 0 0 0 0 0 0
61905 - 0 0 0 0 0 0 0 0 0 0 0 0
61906 - 0 0 0 0 0 0 0 0 0 0 0 0
61907 - 0 0 0 0 0 0 0 0 0 0 0 0
61908 - 0 0 0 0 0 0 0 0 0 0 0 0
61909 - 0 0 0 0 0 0 0 0 0 0 0 0
61910 - 0 0 0 6 6 6 22 22 22 50 50 50
61911 - 78 78 78 34 34 34 2 2 6 2 2 6
61912 - 2 2 6 2 2 6 2 2 6 2 2 6
61913 - 2 2 6 2 2 6 2 2 6 2 2 6
61914 - 2 2 6 2 2 6 6 6 6 70 70 70
61915 - 78 78 78 46 46 46 22 22 22 6 6 6
61916 - 0 0 0 0 0 0 0 0 0 0 0 0
61917 - 0 0 0 0 0 0 0 0 0 0 0 0
61918 - 0 0 0 0 0 0 0 0 0 0 0 0
61919 - 0 0 0 0 0 0 0 0 0 0 0 0
61920 - 0 0 0 0 0 0 0 0 0 0 0 0
61921 - 0 0 0 0 0 0 0 0 0 0 0 0
61922 - 0 0 0 0 0 0 0 0 0 0 0 0
61923 - 0 0 0 0 0 0 0 0 0 0 0 0
61924 - 0 0 1 0 0 1 0 0 1 0 0 0
61925 - 0 0 0 0 0 0 0 0 0 0 0 0
61926 - 0 0 0 0 0 0 0 0 0 0 0 0
61927 - 0 0 0 0 0 0 0 0 0 0 0 0
61928 - 0 0 0 0 0 0 0 0 0 0 0 0
61929 - 0 0 0 0 0 0 0 0 0 0 0 0
61930 - 6 6 6 18 18 18 42 42 42 82 82 82
61931 - 26 26 26 2 2 6 2 2 6 2 2 6
61932 - 2 2 6 2 2 6 2 2 6 2 2 6
61933 - 2 2 6 2 2 6 2 2 6 14 14 14
61934 - 46 46 46 34 34 34 6 6 6 2 2 6
61935 - 42 42 42 78 78 78 42 42 42 18 18 18
61936 - 6 6 6 0 0 0 0 0 0 0 0 0
61937 - 0 0 0 0 0 0 0 0 0 0 0 0
61938 - 0 0 0 0 0 0 0 0 0 0 0 0
61939 - 0 0 0 0 0 0 0 0 0 0 0 0
61940 - 0 0 0 0 0 0 0 0 0 0 0 0
61941 - 0 0 0 0 0 0 0 0 0 0 0 0
61942 - 0 0 0 0 0 0 0 0 0 0 0 0
61943 - 0 0 0 0 0 0 0 0 0 0 0 0
61944 - 0 0 1 0 0 0 0 0 1 0 0 0
61945 - 0 0 0 0 0 0 0 0 0 0 0 0
61946 - 0 0 0 0 0 0 0 0 0 0 0 0
61947 - 0 0 0 0 0 0 0 0 0 0 0 0
61948 - 0 0 0 0 0 0 0 0 0 0 0 0
61949 - 0 0 0 0 0 0 0 0 0 0 0 0
61950 - 10 10 10 30 30 30 66 66 66 58 58 58
61951 - 2 2 6 2 2 6 2 2 6 2 2 6
61952 - 2 2 6 2 2 6 2 2 6 2 2 6
61953 - 2 2 6 2 2 6 2 2 6 26 26 26
61954 - 86 86 86 101 101 101 46 46 46 10 10 10
61955 - 2 2 6 58 58 58 70 70 70 34 34 34
61956 - 10 10 10 0 0 0 0 0 0 0 0 0
61957 - 0 0 0 0 0 0 0 0 0 0 0 0
61958 - 0 0 0 0 0 0 0 0 0 0 0 0
61959 - 0 0 0 0 0 0 0 0 0 0 0 0
61960 - 0 0 0 0 0 0 0 0 0 0 0 0
61961 - 0 0 0 0 0 0 0 0 0 0 0 0
61962 - 0 0 0 0 0 0 0 0 0 0 0 0
61963 - 0 0 0 0 0 0 0 0 0 0 0 0
61964 - 0 0 1 0 0 1 0 0 1 0 0 0
61965 - 0 0 0 0 0 0 0 0 0 0 0 0
61966 - 0 0 0 0 0 0 0 0 0 0 0 0
61967 - 0 0 0 0 0 0 0 0 0 0 0 0
61968 - 0 0 0 0 0 0 0 0 0 0 0 0
61969 - 0 0 0 0 0 0 0 0 0 0 0 0
61970 - 14 14 14 42 42 42 86 86 86 10 10 10
61971 - 2 2 6 2 2 6 2 2 6 2 2 6
61972 - 2 2 6 2 2 6 2 2 6 2 2 6
61973 - 2 2 6 2 2 6 2 2 6 30 30 30
61974 - 94 94 94 94 94 94 58 58 58 26 26 26
61975 - 2 2 6 6 6 6 78 78 78 54 54 54
61976 - 22 22 22 6 6 6 0 0 0 0 0 0
61977 - 0 0 0 0 0 0 0 0 0 0 0 0
61978 - 0 0 0 0 0 0 0 0 0 0 0 0
61979 - 0 0 0 0 0 0 0 0 0 0 0 0
61980 - 0 0 0 0 0 0 0 0 0 0 0 0
61981 - 0 0 0 0 0 0 0 0 0 0 0 0
61982 - 0 0 0 0 0 0 0 0 0 0 0 0
61983 - 0 0 0 0 0 0 0 0 0 0 0 0
61984 - 0 0 0 0 0 0 0 0 0 0 0 0
61985 - 0 0 0 0 0 0 0 0 0 0 0 0
61986 - 0 0 0 0 0 0 0 0 0 0 0 0
61987 - 0 0 0 0 0 0 0 0 0 0 0 0
61988 - 0 0 0 0 0 0 0 0 0 0 0 0
61989 - 0 0 0 0 0 0 0 0 0 6 6 6
61990 - 22 22 22 62 62 62 62 62 62 2 2 6
61991 - 2 2 6 2 2 6 2 2 6 2 2 6
61992 - 2 2 6 2 2 6 2 2 6 2 2 6
61993 - 2 2 6 2 2 6 2 2 6 26 26 26
61994 - 54 54 54 38 38 38 18 18 18 10 10 10
61995 - 2 2 6 2 2 6 34 34 34 82 82 82
61996 - 38 38 38 14 14 14 0 0 0 0 0 0
61997 - 0 0 0 0 0 0 0 0 0 0 0 0
61998 - 0 0 0 0 0 0 0 0 0 0 0 0
61999 - 0 0 0 0 0 0 0 0 0 0 0 0
62000 - 0 0 0 0 0 0 0 0 0 0 0 0
62001 - 0 0 0 0 0 0 0 0 0 0 0 0
62002 - 0 0 0 0 0 0 0 0 0 0 0 0
62003 - 0 0 0 0 0 0 0 0 0 0 0 0
62004 - 0 0 0 0 0 1 0 0 1 0 0 0
62005 - 0 0 0 0 0 0 0 0 0 0 0 0
62006 - 0 0 0 0 0 0 0 0 0 0 0 0
62007 - 0 0 0 0 0 0 0 0 0 0 0 0
62008 - 0 0 0 0 0 0 0 0 0 0 0 0
62009 - 0 0 0 0 0 0 0 0 0 6 6 6
62010 - 30 30 30 78 78 78 30 30 30 2 2 6
62011 - 2 2 6 2 2 6 2 2 6 2 2 6
62012 - 2 2 6 2 2 6 2 2 6 2 2 6
62013 - 2 2 6 2 2 6 2 2 6 10 10 10
62014 - 10 10 10 2 2 6 2 2 6 2 2 6
62015 - 2 2 6 2 2 6 2 2 6 78 78 78
62016 - 50 50 50 18 18 18 6 6 6 0 0 0
62017 - 0 0 0 0 0 0 0 0 0 0 0 0
62018 - 0 0 0 0 0 0 0 0 0 0 0 0
62019 - 0 0 0 0 0 0 0 0 0 0 0 0
62020 - 0 0 0 0 0 0 0 0 0 0 0 0
62021 - 0 0 0 0 0 0 0 0 0 0 0 0
62022 - 0 0 0 0 0 0 0 0 0 0 0 0
62023 - 0 0 0 0 0 0 0 0 0 0 0 0
62024 - 0 0 1 0 0 0 0 0 0 0 0 0
62025 - 0 0 0 0 0 0 0 0 0 0 0 0
62026 - 0 0 0 0 0 0 0 0 0 0 0 0
62027 - 0 0 0 0 0 0 0 0 0 0 0 0
62028 - 0 0 0 0 0 0 0 0 0 0 0 0
62029 - 0 0 0 0 0 0 0 0 0 10 10 10
62030 - 38 38 38 86 86 86 14 14 14 2 2 6
62031 - 2 2 6 2 2 6 2 2 6 2 2 6
62032 - 2 2 6 2 2 6 2 2 6 2 2 6
62033 - 2 2 6 2 2 6 2 2 6 2 2 6
62034 - 2 2 6 2 2 6 2 2 6 2 2 6
62035 - 2 2 6 2 2 6 2 2 6 54 54 54
62036 - 66 66 66 26 26 26 6 6 6 0 0 0
62037 - 0 0 0 0 0 0 0 0 0 0 0 0
62038 - 0 0 0 0 0 0 0 0 0 0 0 0
62039 - 0 0 0 0 0 0 0 0 0 0 0 0
62040 - 0 0 0 0 0 0 0 0 0 0 0 0
62041 - 0 0 0 0 0 0 0 0 0 0 0 0
62042 - 0 0 0 0 0 0 0 0 0 0 0 0
62043 - 0 0 0 0 0 0 0 0 0 0 0 0
62044 - 0 0 0 0 0 1 0 0 1 0 0 0
62045 - 0 0 0 0 0 0 0 0 0 0 0 0
62046 - 0 0 0 0 0 0 0 0 0 0 0 0
62047 - 0 0 0 0 0 0 0 0 0 0 0 0
62048 - 0 0 0 0 0 0 0 0 0 0 0 0
62049 - 0 0 0 0 0 0 0 0 0 14 14 14
62050 - 42 42 42 82 82 82 2 2 6 2 2 6
62051 - 2 2 6 6 6 6 10 10 10 2 2 6
62052 - 2 2 6 2 2 6 2 2 6 2 2 6
62053 - 2 2 6 2 2 6 2 2 6 6 6 6
62054 - 14 14 14 10 10 10 2 2 6 2 2 6
62055 - 2 2 6 2 2 6 2 2 6 18 18 18
62056 - 82 82 82 34 34 34 10 10 10 0 0 0
62057 - 0 0 0 0 0 0 0 0 0 0 0 0
62058 - 0 0 0 0 0 0 0 0 0 0 0 0
62059 - 0 0 0 0 0 0 0 0 0 0 0 0
62060 - 0 0 0 0 0 0 0 0 0 0 0 0
62061 - 0 0 0 0 0 0 0 0 0 0 0 0
62062 - 0 0 0 0 0 0 0 0 0 0 0 0
62063 - 0 0 0 0 0 0 0 0 0 0 0 0
62064 - 0 0 1 0 0 0 0 0 0 0 0 0
62065 - 0 0 0 0 0 0 0 0 0 0 0 0
62066 - 0 0 0 0 0 0 0 0 0 0 0 0
62067 - 0 0 0 0 0 0 0 0 0 0 0 0
62068 - 0 0 0 0 0 0 0 0 0 0 0 0
62069 - 0 0 0 0 0 0 0 0 0 14 14 14
62070 - 46 46 46 86 86 86 2 2 6 2 2 6
62071 - 6 6 6 6 6 6 22 22 22 34 34 34
62072 - 6 6 6 2 2 6 2 2 6 2 2 6
62073 - 2 2 6 2 2 6 18 18 18 34 34 34
62074 - 10 10 10 50 50 50 22 22 22 2 2 6
62075 - 2 2 6 2 2 6 2 2 6 10 10 10
62076 - 86 86 86 42 42 42 14 14 14 0 0 0
62077 - 0 0 0 0 0 0 0 0 0 0 0 0
62078 - 0 0 0 0 0 0 0 0 0 0 0 0
62079 - 0 0 0 0 0 0 0 0 0 0 0 0
62080 - 0 0 0 0 0 0 0 0 0 0 0 0
62081 - 0 0 0 0 0 0 0 0 0 0 0 0
62082 - 0 0 0 0 0 0 0 0 0 0 0 0
62083 - 0 0 0 0 0 0 0 0 0 0 0 0
62084 - 0 0 1 0 0 1 0 0 1 0 0 0
62085 - 0 0 0 0 0 0 0 0 0 0 0 0
62086 - 0 0 0 0 0 0 0 0 0 0 0 0
62087 - 0 0 0 0 0 0 0 0 0 0 0 0
62088 - 0 0 0 0 0 0 0 0 0 0 0 0
62089 - 0 0 0 0 0 0 0 0 0 14 14 14
62090 - 46 46 46 86 86 86 2 2 6 2 2 6
62091 - 38 38 38 116 116 116 94 94 94 22 22 22
62092 - 22 22 22 2 2 6 2 2 6 2 2 6
62093 - 14 14 14 86 86 86 138 138 138 162 162 162
62094 -154 154 154 38 38 38 26 26 26 6 6 6
62095 - 2 2 6 2 2 6 2 2 6 2 2 6
62096 - 86 86 86 46 46 46 14 14 14 0 0 0
62097 - 0 0 0 0 0 0 0 0 0 0 0 0
62098 - 0 0 0 0 0 0 0 0 0 0 0 0
62099 - 0 0 0 0 0 0 0 0 0 0 0 0
62100 - 0 0 0 0 0 0 0 0 0 0 0 0
62101 - 0 0 0 0 0 0 0 0 0 0 0 0
62102 - 0 0 0 0 0 0 0 0 0 0 0 0
62103 - 0 0 0 0 0 0 0 0 0 0 0 0
62104 - 0 0 0 0 0 0 0 0 0 0 0 0
62105 - 0 0 0 0 0 0 0 0 0 0 0 0
62106 - 0 0 0 0 0 0 0 0 0 0 0 0
62107 - 0 0 0 0 0 0 0 0 0 0 0 0
62108 - 0 0 0 0 0 0 0 0 0 0 0 0
62109 - 0 0 0 0 0 0 0 0 0 14 14 14
62110 - 46 46 46 86 86 86 2 2 6 14 14 14
62111 -134 134 134 198 198 198 195 195 195 116 116 116
62112 - 10 10 10 2 2 6 2 2 6 6 6 6
62113 -101 98 89 187 187 187 210 210 210 218 218 218
62114 -214 214 214 134 134 134 14 14 14 6 6 6
62115 - 2 2 6 2 2 6 2 2 6 2 2 6
62116 - 86 86 86 50 50 50 18 18 18 6 6 6
62117 - 0 0 0 0 0 0 0 0 0 0 0 0
62118 - 0 0 0 0 0 0 0 0 0 0 0 0
62119 - 0 0 0 0 0 0 0 0 0 0 0 0
62120 - 0 0 0 0 0 0 0 0 0 0 0 0
62121 - 0 0 0 0 0 0 0 0 0 0 0 0
62122 - 0 0 0 0 0 0 0 0 0 0 0 0
62123 - 0 0 0 0 0 0 0 0 1 0 0 0
62124 - 0 0 1 0 0 1 0 0 1 0 0 0
62125 - 0 0 0 0 0 0 0 0 0 0 0 0
62126 - 0 0 0 0 0 0 0 0 0 0 0 0
62127 - 0 0 0 0 0 0 0 0 0 0 0 0
62128 - 0 0 0 0 0 0 0 0 0 0 0 0
62129 - 0 0 0 0 0 0 0 0 0 14 14 14
62130 - 46 46 46 86 86 86 2 2 6 54 54 54
62131 -218 218 218 195 195 195 226 226 226 246 246 246
62132 - 58 58 58 2 2 6 2 2 6 30 30 30
62133 -210 210 210 253 253 253 174 174 174 123 123 123
62134 -221 221 221 234 234 234 74 74 74 2 2 6
62135 - 2 2 6 2 2 6 2 2 6 2 2 6
62136 - 70 70 70 58 58 58 22 22 22 6 6 6
62137 - 0 0 0 0 0 0 0 0 0 0 0 0
62138 - 0 0 0 0 0 0 0 0 0 0 0 0
62139 - 0 0 0 0 0 0 0 0 0 0 0 0
62140 - 0 0 0 0 0 0 0 0 0 0 0 0
62141 - 0 0 0 0 0 0 0 0 0 0 0 0
62142 - 0 0 0 0 0 0 0 0 0 0 0 0
62143 - 0 0 0 0 0 0 0 0 0 0 0 0
62144 - 0 0 0 0 0 0 0 0 0 0 0 0
62145 - 0 0 0 0 0 0 0 0 0 0 0 0
62146 - 0 0 0 0 0 0 0 0 0 0 0 0
62147 - 0 0 0 0 0 0 0 0 0 0 0 0
62148 - 0 0 0 0 0 0 0 0 0 0 0 0
62149 - 0 0 0 0 0 0 0 0 0 14 14 14
62150 - 46 46 46 82 82 82 2 2 6 106 106 106
62151 -170 170 170 26 26 26 86 86 86 226 226 226
62152 -123 123 123 10 10 10 14 14 14 46 46 46
62153 -231 231 231 190 190 190 6 6 6 70 70 70
62154 - 90 90 90 238 238 238 158 158 158 2 2 6
62155 - 2 2 6 2 2 6 2 2 6 2 2 6
62156 - 70 70 70 58 58 58 22 22 22 6 6 6
62157 - 0 0 0 0 0 0 0 0 0 0 0 0
62158 - 0 0 0 0 0 0 0 0 0 0 0 0
62159 - 0 0 0 0 0 0 0 0 0 0 0 0
62160 - 0 0 0 0 0 0 0 0 0 0 0 0
62161 - 0 0 0 0 0 0 0 0 0 0 0 0
62162 - 0 0 0 0 0 0 0 0 0 0 0 0
62163 - 0 0 0 0 0 0 0 0 1 0 0 0
62164 - 0 0 1 0 0 1 0 0 1 0 0 0
62165 - 0 0 0 0 0 0 0 0 0 0 0 0
62166 - 0 0 0 0 0 0 0 0 0 0 0 0
62167 - 0 0 0 0 0 0 0 0 0 0 0 0
62168 - 0 0 0 0 0 0 0 0 0 0 0 0
62169 - 0 0 0 0 0 0 0 0 0 14 14 14
62170 - 42 42 42 86 86 86 6 6 6 116 116 116
62171 -106 106 106 6 6 6 70 70 70 149 149 149
62172 -128 128 128 18 18 18 38 38 38 54 54 54
62173 -221 221 221 106 106 106 2 2 6 14 14 14
62174 - 46 46 46 190 190 190 198 198 198 2 2 6
62175 - 2 2 6 2 2 6 2 2 6 2 2 6
62176 - 74 74 74 62 62 62 22 22 22 6 6 6
62177 - 0 0 0 0 0 0 0 0 0 0 0 0
62178 - 0 0 0 0 0 0 0 0 0 0 0 0
62179 - 0 0 0 0 0 0 0 0 0 0 0 0
62180 - 0 0 0 0 0 0 0 0 0 0 0 0
62181 - 0 0 0 0 0 0 0 0 0 0 0 0
62182 - 0 0 0 0 0 0 0 0 0 0 0 0
62183 - 0 0 0 0 0 0 0 0 1 0 0 0
62184 - 0 0 1 0 0 0 0 0 1 0 0 0
62185 - 0 0 0 0 0 0 0 0 0 0 0 0
62186 - 0 0 0 0 0 0 0 0 0 0 0 0
62187 - 0 0 0 0 0 0 0 0 0 0 0 0
62188 - 0 0 0 0 0 0 0 0 0 0 0 0
62189 - 0 0 0 0 0 0 0 0 0 14 14 14
62190 - 42 42 42 94 94 94 14 14 14 101 101 101
62191 -128 128 128 2 2 6 18 18 18 116 116 116
62192 -118 98 46 121 92 8 121 92 8 98 78 10
62193 -162 162 162 106 106 106 2 2 6 2 2 6
62194 - 2 2 6 195 195 195 195 195 195 6 6 6
62195 - 2 2 6 2 2 6 2 2 6 2 2 6
62196 - 74 74 74 62 62 62 22 22 22 6 6 6
62197 - 0 0 0 0 0 0 0 0 0 0 0 0
62198 - 0 0 0 0 0 0 0 0 0 0 0 0
62199 - 0 0 0 0 0 0 0 0 0 0 0 0
62200 - 0 0 0 0 0 0 0 0 0 0 0 0
62201 - 0 0 0 0 0 0 0 0 0 0 0 0
62202 - 0 0 0 0 0 0 0 0 0 0 0 0
62203 - 0 0 0 0 0 0 0 0 1 0 0 1
62204 - 0 0 1 0 0 0 0 0 1 0 0 0
62205 - 0 0 0 0 0 0 0 0 0 0 0 0
62206 - 0 0 0 0 0 0 0 0 0 0 0 0
62207 - 0 0 0 0 0 0 0 0 0 0 0 0
62208 - 0 0 0 0 0 0 0 0 0 0 0 0
62209 - 0 0 0 0 0 0 0 0 0 10 10 10
62210 - 38 38 38 90 90 90 14 14 14 58 58 58
62211 -210 210 210 26 26 26 54 38 6 154 114 10
62212 -226 170 11 236 186 11 225 175 15 184 144 12
62213 -215 174 15 175 146 61 37 26 9 2 2 6
62214 - 70 70 70 246 246 246 138 138 138 2 2 6
62215 - 2 2 6 2 2 6 2 2 6 2 2 6
62216 - 70 70 70 66 66 66 26 26 26 6 6 6
62217 - 0 0 0 0 0 0 0 0 0 0 0 0
62218 - 0 0 0 0 0 0 0 0 0 0 0 0
62219 - 0 0 0 0 0 0 0 0 0 0 0 0
62220 - 0 0 0 0 0 0 0 0 0 0 0 0
62221 - 0 0 0 0 0 0 0 0 0 0 0 0
62222 - 0 0 0 0 0 0 0 0 0 0 0 0
62223 - 0 0 0 0 0 0 0 0 0 0 0 0
62224 - 0 0 0 0 0 0 0 0 0 0 0 0
62225 - 0 0 0 0 0 0 0 0 0 0 0 0
62226 - 0 0 0 0 0 0 0 0 0 0 0 0
62227 - 0 0 0 0 0 0 0 0 0 0 0 0
62228 - 0 0 0 0 0 0 0 0 0 0 0 0
62229 - 0 0 0 0 0 0 0 0 0 10 10 10
62230 - 38 38 38 86 86 86 14 14 14 10 10 10
62231 -195 195 195 188 164 115 192 133 9 225 175 15
62232 -239 182 13 234 190 10 232 195 16 232 200 30
62233 -245 207 45 241 208 19 232 195 16 184 144 12
62234 -218 194 134 211 206 186 42 42 42 2 2 6
62235 - 2 2 6 2 2 6 2 2 6 2 2 6
62236 - 50 50 50 74 74 74 30 30 30 6 6 6
62237 - 0 0 0 0 0 0 0 0 0 0 0 0
62238 - 0 0 0 0 0 0 0 0 0 0 0 0
62239 - 0 0 0 0 0 0 0 0 0 0 0 0
62240 - 0 0 0 0 0 0 0 0 0 0 0 0
62241 - 0 0 0 0 0 0 0 0 0 0 0 0
62242 - 0 0 0 0 0 0 0 0 0 0 0 0
62243 - 0 0 0 0 0 0 0 0 0 0 0 0
62244 - 0 0 0 0 0 0 0 0 0 0 0 0
62245 - 0 0 0 0 0 0 0 0 0 0 0 0
62246 - 0 0 0 0 0 0 0 0 0 0 0 0
62247 - 0 0 0 0 0 0 0 0 0 0 0 0
62248 - 0 0 0 0 0 0 0 0 0 0 0 0
62249 - 0 0 0 0 0 0 0 0 0 10 10 10
62250 - 34 34 34 86 86 86 14 14 14 2 2 6
62251 -121 87 25 192 133 9 219 162 10 239 182 13
62252 -236 186 11 232 195 16 241 208 19 244 214 54
62253 -246 218 60 246 218 38 246 215 20 241 208 19
62254 -241 208 19 226 184 13 121 87 25 2 2 6
62255 - 2 2 6 2 2 6 2 2 6 2 2 6
62256 - 50 50 50 82 82 82 34 34 34 10 10 10
62257 - 0 0 0 0 0 0 0 0 0 0 0 0
62258 - 0 0 0 0 0 0 0 0 0 0 0 0
62259 - 0 0 0 0 0 0 0 0 0 0 0 0
62260 - 0 0 0 0 0 0 0 0 0 0 0 0
62261 - 0 0 0 0 0 0 0 0 0 0 0 0
62262 - 0 0 0 0 0 0 0 0 0 0 0 0
62263 - 0 0 0 0 0 0 0 0 0 0 0 0
62264 - 0 0 0 0 0 0 0 0 0 0 0 0
62265 - 0 0 0 0 0 0 0 0 0 0 0 0
62266 - 0 0 0 0 0 0 0 0 0 0 0 0
62267 - 0 0 0 0 0 0 0 0 0 0 0 0
62268 - 0 0 0 0 0 0 0 0 0 0 0 0
62269 - 0 0 0 0 0 0 0 0 0 10 10 10
62270 - 34 34 34 82 82 82 30 30 30 61 42 6
62271 -180 123 7 206 145 10 230 174 11 239 182 13
62272 -234 190 10 238 202 15 241 208 19 246 218 74
62273 -246 218 38 246 215 20 246 215 20 246 215 20
62274 -226 184 13 215 174 15 184 144 12 6 6 6
62275 - 2 2 6 2 2 6 2 2 6 2 2 6
62276 - 26 26 26 94 94 94 42 42 42 14 14 14
62277 - 0 0 0 0 0 0 0 0 0 0 0 0
62278 - 0 0 0 0 0 0 0 0 0 0 0 0
62279 - 0 0 0 0 0 0 0 0 0 0 0 0
62280 - 0 0 0 0 0 0 0 0 0 0 0 0
62281 - 0 0 0 0 0 0 0 0 0 0 0 0
62282 - 0 0 0 0 0 0 0 0 0 0 0 0
62283 - 0 0 0 0 0 0 0 0 0 0 0 0
62284 - 0 0 0 0 0 0 0 0 0 0 0 0
62285 - 0 0 0 0 0 0 0 0 0 0 0 0
62286 - 0 0 0 0 0 0 0 0 0 0 0 0
62287 - 0 0 0 0 0 0 0 0 0 0 0 0
62288 - 0 0 0 0 0 0 0 0 0 0 0 0
62289 - 0 0 0 0 0 0 0 0 0 10 10 10
62290 - 30 30 30 78 78 78 50 50 50 104 69 6
62291 -192 133 9 216 158 10 236 178 12 236 186 11
62292 -232 195 16 241 208 19 244 214 54 245 215 43
62293 -246 215 20 246 215 20 241 208 19 198 155 10
62294 -200 144 11 216 158 10 156 118 10 2 2 6
62295 - 2 2 6 2 2 6 2 2 6 2 2 6
62296 - 6 6 6 90 90 90 54 54 54 18 18 18
62297 - 6 6 6 0 0 0 0 0 0 0 0 0
62298 - 0 0 0 0 0 0 0 0 0 0 0 0
62299 - 0 0 0 0 0 0 0 0 0 0 0 0
62300 - 0 0 0 0 0 0 0 0 0 0 0 0
62301 - 0 0 0 0 0 0 0 0 0 0 0 0
62302 - 0 0 0 0 0 0 0 0 0 0 0 0
62303 - 0 0 0 0 0 0 0 0 0 0 0 0
62304 - 0 0 0 0 0 0 0 0 0 0 0 0
62305 - 0 0 0 0 0 0 0 0 0 0 0 0
62306 - 0 0 0 0 0 0 0 0 0 0 0 0
62307 - 0 0 0 0 0 0 0 0 0 0 0 0
62308 - 0 0 0 0 0 0 0 0 0 0 0 0
62309 - 0 0 0 0 0 0 0 0 0 10 10 10
62310 - 30 30 30 78 78 78 46 46 46 22 22 22
62311 -137 92 6 210 162 10 239 182 13 238 190 10
62312 -238 202 15 241 208 19 246 215 20 246 215 20
62313 -241 208 19 203 166 17 185 133 11 210 150 10
62314 -216 158 10 210 150 10 102 78 10 2 2 6
62315 - 6 6 6 54 54 54 14 14 14 2 2 6
62316 - 2 2 6 62 62 62 74 74 74 30 30 30
62317 - 10 10 10 0 0 0 0 0 0 0 0 0
62318 - 0 0 0 0 0 0 0 0 0 0 0 0
62319 - 0 0 0 0 0 0 0 0 0 0 0 0
62320 - 0 0 0 0 0 0 0 0 0 0 0 0
62321 - 0 0 0 0 0 0 0 0 0 0 0 0
62322 - 0 0 0 0 0 0 0 0 0 0 0 0
62323 - 0 0 0 0 0 0 0 0 0 0 0 0
62324 - 0 0 0 0 0 0 0 0 0 0 0 0
62325 - 0 0 0 0 0 0 0 0 0 0 0 0
62326 - 0 0 0 0 0 0 0 0 0 0 0 0
62327 - 0 0 0 0 0 0 0 0 0 0 0 0
62328 - 0 0 0 0 0 0 0 0 0 0 0 0
62329 - 0 0 0 0 0 0 0 0 0 10 10 10
62330 - 34 34 34 78 78 78 50 50 50 6 6 6
62331 - 94 70 30 139 102 15 190 146 13 226 184 13
62332 -232 200 30 232 195 16 215 174 15 190 146 13
62333 -168 122 10 192 133 9 210 150 10 213 154 11
62334 -202 150 34 182 157 106 101 98 89 2 2 6
62335 - 2 2 6 78 78 78 116 116 116 58 58 58
62336 - 2 2 6 22 22 22 90 90 90 46 46 46
62337 - 18 18 18 6 6 6 0 0 0 0 0 0
62338 - 0 0 0 0 0 0 0 0 0 0 0 0
62339 - 0 0 0 0 0 0 0 0 0 0 0 0
62340 - 0 0 0 0 0 0 0 0 0 0 0 0
62341 - 0 0 0 0 0 0 0 0 0 0 0 0
62342 - 0 0 0 0 0 0 0 0 0 0 0 0
62343 - 0 0 0 0 0 0 0 0 0 0 0 0
62344 - 0 0 0 0 0 0 0 0 0 0 0 0
62345 - 0 0 0 0 0 0 0 0 0 0 0 0
62346 - 0 0 0 0 0 0 0 0 0 0 0 0
62347 - 0 0 0 0 0 0 0 0 0 0 0 0
62348 - 0 0 0 0 0 0 0 0 0 0 0 0
62349 - 0 0 0 0 0 0 0 0 0 10 10 10
62350 - 38 38 38 86 86 86 50 50 50 6 6 6
62351 -128 128 128 174 154 114 156 107 11 168 122 10
62352 -198 155 10 184 144 12 197 138 11 200 144 11
62353 -206 145 10 206 145 10 197 138 11 188 164 115
62354 -195 195 195 198 198 198 174 174 174 14 14 14
62355 - 2 2 6 22 22 22 116 116 116 116 116 116
62356 - 22 22 22 2 2 6 74 74 74 70 70 70
62357 - 30 30 30 10 10 10 0 0 0 0 0 0
62358 - 0 0 0 0 0 0 0 0 0 0 0 0
62359 - 0 0 0 0 0 0 0 0 0 0 0 0
62360 - 0 0 0 0 0 0 0 0 0 0 0 0
62361 - 0 0 0 0 0 0 0 0 0 0 0 0
62362 - 0 0 0 0 0 0 0 0 0 0 0 0
62363 - 0 0 0 0 0 0 0 0 0 0 0 0
62364 - 0 0 0 0 0 0 0 0 0 0 0 0
62365 - 0 0 0 0 0 0 0 0 0 0 0 0
62366 - 0 0 0 0 0 0 0 0 0 0 0 0
62367 - 0 0 0 0 0 0 0 0 0 0 0 0
62368 - 0 0 0 0 0 0 0 0 0 0 0 0
62369 - 0 0 0 0 0 0 6 6 6 18 18 18
62370 - 50 50 50 101 101 101 26 26 26 10 10 10
62371 -138 138 138 190 190 190 174 154 114 156 107 11
62372 -197 138 11 200 144 11 197 138 11 192 133 9
62373 -180 123 7 190 142 34 190 178 144 187 187 187
62374 -202 202 202 221 221 221 214 214 214 66 66 66
62375 - 2 2 6 2 2 6 50 50 50 62 62 62
62376 - 6 6 6 2 2 6 10 10 10 90 90 90
62377 - 50 50 50 18 18 18 6 6 6 0 0 0
62378 - 0 0 0 0 0 0 0 0 0 0 0 0
62379 - 0 0 0 0 0 0 0 0 0 0 0 0
62380 - 0 0 0 0 0 0 0 0 0 0 0 0
62381 - 0 0 0 0 0 0 0 0 0 0 0 0
62382 - 0 0 0 0 0 0 0 0 0 0 0 0
62383 - 0 0 0 0 0 0 0 0 0 0 0 0
62384 - 0 0 0 0 0 0 0 0 0 0 0 0
62385 - 0 0 0 0 0 0 0 0 0 0 0 0
62386 - 0 0 0 0 0 0 0 0 0 0 0 0
62387 - 0 0 0 0 0 0 0 0 0 0 0 0
62388 - 0 0 0 0 0 0 0 0 0 0 0 0
62389 - 0 0 0 0 0 0 10 10 10 34 34 34
62390 - 74 74 74 74 74 74 2 2 6 6 6 6
62391 -144 144 144 198 198 198 190 190 190 178 166 146
62392 -154 121 60 156 107 11 156 107 11 168 124 44
62393 -174 154 114 187 187 187 190 190 190 210 210 210
62394 -246 246 246 253 253 253 253 253 253 182 182 182
62395 - 6 6 6 2 2 6 2 2 6 2 2 6
62396 - 2 2 6 2 2 6 2 2 6 62 62 62
62397 - 74 74 74 34 34 34 14 14 14 0 0 0
62398 - 0 0 0 0 0 0 0 0 0 0 0 0
62399 - 0 0 0 0 0 0 0 0 0 0 0 0
62400 - 0 0 0 0 0 0 0 0 0 0 0 0
62401 - 0 0 0 0 0 0 0 0 0 0 0 0
62402 - 0 0 0 0 0 0 0 0 0 0 0 0
62403 - 0 0 0 0 0 0 0 0 0 0 0 0
62404 - 0 0 0 0 0 0 0 0 0 0 0 0
62405 - 0 0 0 0 0 0 0 0 0 0 0 0
62406 - 0 0 0 0 0 0 0 0 0 0 0 0
62407 - 0 0 0 0 0 0 0 0 0 0 0 0
62408 - 0 0 0 0 0 0 0 0 0 0 0 0
62409 - 0 0 0 10 10 10 22 22 22 54 54 54
62410 - 94 94 94 18 18 18 2 2 6 46 46 46
62411 -234 234 234 221 221 221 190 190 190 190 190 190
62412 -190 190 190 187 187 187 187 187 187 190 190 190
62413 -190 190 190 195 195 195 214 214 214 242 242 242
62414 -253 253 253 253 253 253 253 253 253 253 253 253
62415 - 82 82 82 2 2 6 2 2 6 2 2 6
62416 - 2 2 6 2 2 6 2 2 6 14 14 14
62417 - 86 86 86 54 54 54 22 22 22 6 6 6
62418 - 0 0 0 0 0 0 0 0 0 0 0 0
62419 - 0 0 0 0 0 0 0 0 0 0 0 0
62420 - 0 0 0 0 0 0 0 0 0 0 0 0
62421 - 0 0 0 0 0 0 0 0 0 0 0 0
62422 - 0 0 0 0 0 0 0 0 0 0 0 0
62423 - 0 0 0 0 0 0 0 0 0 0 0 0
62424 - 0 0 0 0 0 0 0 0 0 0 0 0
62425 - 0 0 0 0 0 0 0 0 0 0 0 0
62426 - 0 0 0 0 0 0 0 0 0 0 0 0
62427 - 0 0 0 0 0 0 0 0 0 0 0 0
62428 - 0 0 0 0 0 0 0 0 0 0 0 0
62429 - 6 6 6 18 18 18 46 46 46 90 90 90
62430 - 46 46 46 18 18 18 6 6 6 182 182 182
62431 -253 253 253 246 246 246 206 206 206 190 190 190
62432 -190 190 190 190 190 190 190 190 190 190 190 190
62433 -206 206 206 231 231 231 250 250 250 253 253 253
62434 -253 253 253 253 253 253 253 253 253 253 253 253
62435 -202 202 202 14 14 14 2 2 6 2 2 6
62436 - 2 2 6 2 2 6 2 2 6 2 2 6
62437 - 42 42 42 86 86 86 42 42 42 18 18 18
62438 - 6 6 6 0 0 0 0 0 0 0 0 0
62439 - 0 0 0 0 0 0 0 0 0 0 0 0
62440 - 0 0 0 0 0 0 0 0 0 0 0 0
62441 - 0 0 0 0 0 0 0 0 0 0 0 0
62442 - 0 0 0 0 0 0 0 0 0 0 0 0
62443 - 0 0 0 0 0 0 0 0 0 0 0 0
62444 - 0 0 0 0 0 0 0 0 0 0 0 0
62445 - 0 0 0 0 0 0 0 0 0 0 0 0
62446 - 0 0 0 0 0 0 0 0 0 0 0 0
62447 - 0 0 0 0 0 0 0 0 0 0 0 0
62448 - 0 0 0 0 0 0 0 0 0 6 6 6
62449 - 14 14 14 38 38 38 74 74 74 66 66 66
62450 - 2 2 6 6 6 6 90 90 90 250 250 250
62451 -253 253 253 253 253 253 238 238 238 198 198 198
62452 -190 190 190 190 190 190 195 195 195 221 221 221
62453 -246 246 246 253 253 253 253 253 253 253 253 253
62454 -253 253 253 253 253 253 253 253 253 253 253 253
62455 -253 253 253 82 82 82 2 2 6 2 2 6
62456 - 2 2 6 2 2 6 2 2 6 2 2 6
62457 - 2 2 6 78 78 78 70 70 70 34 34 34
62458 - 14 14 14 6 6 6 0 0 0 0 0 0
62459 - 0 0 0 0 0 0 0 0 0 0 0 0
62460 - 0 0 0 0 0 0 0 0 0 0 0 0
62461 - 0 0 0 0 0 0 0 0 0 0 0 0
62462 - 0 0 0 0 0 0 0 0 0 0 0 0
62463 - 0 0 0 0 0 0 0 0 0 0 0 0
62464 - 0 0 0 0 0 0 0 0 0 0 0 0
62465 - 0 0 0 0 0 0 0 0 0 0 0 0
62466 - 0 0 0 0 0 0 0 0 0 0 0 0
62467 - 0 0 0 0 0 0 0 0 0 0 0 0
62468 - 0 0 0 0 0 0 0 0 0 14 14 14
62469 - 34 34 34 66 66 66 78 78 78 6 6 6
62470 - 2 2 6 18 18 18 218 218 218 253 253 253
62471 -253 253 253 253 253 253 253 253 253 246 246 246
62472 -226 226 226 231 231 231 246 246 246 253 253 253
62473 -253 253 253 253 253 253 253 253 253 253 253 253
62474 -253 253 253 253 253 253 253 253 253 253 253 253
62475 -253 253 253 178 178 178 2 2 6 2 2 6
62476 - 2 2 6 2 2 6 2 2 6 2 2 6
62477 - 2 2 6 18 18 18 90 90 90 62 62 62
62478 - 30 30 30 10 10 10 0 0 0 0 0 0
62479 - 0 0 0 0 0 0 0 0 0 0 0 0
62480 - 0 0 0 0 0 0 0 0 0 0 0 0
62481 - 0 0 0 0 0 0 0 0 0 0 0 0
62482 - 0 0 0 0 0 0 0 0 0 0 0 0
62483 - 0 0 0 0 0 0 0 0 0 0 0 0
62484 - 0 0 0 0 0 0 0 0 0 0 0 0
62485 - 0 0 0 0 0 0 0 0 0 0 0 0
62486 - 0 0 0 0 0 0 0 0 0 0 0 0
62487 - 0 0 0 0 0 0 0 0 0 0 0 0
62488 - 0 0 0 0 0 0 10 10 10 26 26 26
62489 - 58 58 58 90 90 90 18 18 18 2 2 6
62490 - 2 2 6 110 110 110 253 253 253 253 253 253
62491 -253 253 253 253 253 253 253 253 253 253 253 253
62492 -250 250 250 253 253 253 253 253 253 253 253 253
62493 -253 253 253 253 253 253 253 253 253 253 253 253
62494 -253 253 253 253 253 253 253 253 253 253 253 253
62495 -253 253 253 231 231 231 18 18 18 2 2 6
62496 - 2 2 6 2 2 6 2 2 6 2 2 6
62497 - 2 2 6 2 2 6 18 18 18 94 94 94
62498 - 54 54 54 26 26 26 10 10 10 0 0 0
62499 - 0 0 0 0 0 0 0 0 0 0 0 0
62500 - 0 0 0 0 0 0 0 0 0 0 0 0
62501 - 0 0 0 0 0 0 0 0 0 0 0 0
62502 - 0 0 0 0 0 0 0 0 0 0 0 0
62503 - 0 0 0 0 0 0 0 0 0 0 0 0
62504 - 0 0 0 0 0 0 0 0 0 0 0 0
62505 - 0 0 0 0 0 0 0 0 0 0 0 0
62506 - 0 0 0 0 0 0 0 0 0 0 0 0
62507 - 0 0 0 0 0 0 0 0 0 0 0 0
62508 - 0 0 0 6 6 6 22 22 22 50 50 50
62509 - 90 90 90 26 26 26 2 2 6 2 2 6
62510 - 14 14 14 195 195 195 250 250 250 253 253 253
62511 -253 253 253 253 253 253 253 253 253 253 253 253
62512 -253 253 253 253 253 253 253 253 253 253 253 253
62513 -253 253 253 253 253 253 253 253 253 253 253 253
62514 -253 253 253 253 253 253 253 253 253 253 253 253
62515 -250 250 250 242 242 242 54 54 54 2 2 6
62516 - 2 2 6 2 2 6 2 2 6 2 2 6
62517 - 2 2 6 2 2 6 2 2 6 38 38 38
62518 - 86 86 86 50 50 50 22 22 22 6 6 6
62519 - 0 0 0 0 0 0 0 0 0 0 0 0
62520 - 0 0 0 0 0 0 0 0 0 0 0 0
62521 - 0 0 0 0 0 0 0 0 0 0 0 0
62522 - 0 0 0 0 0 0 0 0 0 0 0 0
62523 - 0 0 0 0 0 0 0 0 0 0 0 0
62524 - 0 0 0 0 0 0 0 0 0 0 0 0
62525 - 0 0 0 0 0 0 0 0 0 0 0 0
62526 - 0 0 0 0 0 0 0 0 0 0 0 0
62527 - 0 0 0 0 0 0 0 0 0 0 0 0
62528 - 6 6 6 14 14 14 38 38 38 82 82 82
62529 - 34 34 34 2 2 6 2 2 6 2 2 6
62530 - 42 42 42 195 195 195 246 246 246 253 253 253
62531 -253 253 253 253 253 253 253 253 253 250 250 250
62532 -242 242 242 242 242 242 250 250 250 253 253 253
62533 -253 253 253 253 253 253 253 253 253 253 253 253
62534 -253 253 253 250 250 250 246 246 246 238 238 238
62535 -226 226 226 231 231 231 101 101 101 6 6 6
62536 - 2 2 6 2 2 6 2 2 6 2 2 6
62537 - 2 2 6 2 2 6 2 2 6 2 2 6
62538 - 38 38 38 82 82 82 42 42 42 14 14 14
62539 - 6 6 6 0 0 0 0 0 0 0 0 0
62540 - 0 0 0 0 0 0 0 0 0 0 0 0
62541 - 0 0 0 0 0 0 0 0 0 0 0 0
62542 - 0 0 0 0 0 0 0 0 0 0 0 0
62543 - 0 0 0 0 0 0 0 0 0 0 0 0
62544 - 0 0 0 0 0 0 0 0 0 0 0 0
62545 - 0 0 0 0 0 0 0 0 0 0 0 0
62546 - 0 0 0 0 0 0 0 0 0 0 0 0
62547 - 0 0 0 0 0 0 0 0 0 0 0 0
62548 - 10 10 10 26 26 26 62 62 62 66 66 66
62549 - 2 2 6 2 2 6 2 2 6 6 6 6
62550 - 70 70 70 170 170 170 206 206 206 234 234 234
62551 -246 246 246 250 250 250 250 250 250 238 238 238
62552 -226 226 226 231 231 231 238 238 238 250 250 250
62553 -250 250 250 250 250 250 246 246 246 231 231 231
62554 -214 214 214 206 206 206 202 202 202 202 202 202
62555 -198 198 198 202 202 202 182 182 182 18 18 18
62556 - 2 2 6 2 2 6 2 2 6 2 2 6
62557 - 2 2 6 2 2 6 2 2 6 2 2 6
62558 - 2 2 6 62 62 62 66 66 66 30 30 30
62559 - 10 10 10 0 0 0 0 0 0 0 0 0
62560 - 0 0 0 0 0 0 0 0 0 0 0 0
62561 - 0 0 0 0 0 0 0 0 0 0 0 0
62562 - 0 0 0 0 0 0 0 0 0 0 0 0
62563 - 0 0 0 0 0 0 0 0 0 0 0 0
62564 - 0 0 0 0 0 0 0 0 0 0 0 0
62565 - 0 0 0 0 0 0 0 0 0 0 0 0
62566 - 0 0 0 0 0 0 0 0 0 0 0 0
62567 - 0 0 0 0 0 0 0 0 0 0 0 0
62568 - 14 14 14 42 42 42 82 82 82 18 18 18
62569 - 2 2 6 2 2 6 2 2 6 10 10 10
62570 - 94 94 94 182 182 182 218 218 218 242 242 242
62571 -250 250 250 253 253 253 253 253 253 250 250 250
62572 -234 234 234 253 253 253 253 253 253 253 253 253
62573 -253 253 253 253 253 253 253 253 253 246 246 246
62574 -238 238 238 226 226 226 210 210 210 202 202 202
62575 -195 195 195 195 195 195 210 210 210 158 158 158
62576 - 6 6 6 14 14 14 50 50 50 14 14 14
62577 - 2 2 6 2 2 6 2 2 6 2 2 6
62578 - 2 2 6 6 6 6 86 86 86 46 46 46
62579 - 18 18 18 6 6 6 0 0 0 0 0 0
62580 - 0 0 0 0 0 0 0 0 0 0 0 0
62581 - 0 0 0 0 0 0 0 0 0 0 0 0
62582 - 0 0 0 0 0 0 0 0 0 0 0 0
62583 - 0 0 0 0 0 0 0 0 0 0 0 0
62584 - 0 0 0 0 0 0 0 0 0 0 0 0
62585 - 0 0 0 0 0 0 0 0 0 0 0 0
62586 - 0 0 0 0 0 0 0 0 0 0 0 0
62587 - 0 0 0 0 0 0 0 0 0 6 6 6
62588 - 22 22 22 54 54 54 70 70 70 2 2 6
62589 - 2 2 6 10 10 10 2 2 6 22 22 22
62590 -166 166 166 231 231 231 250 250 250 253 253 253
62591 -253 253 253 253 253 253 253 253 253 250 250 250
62592 -242 242 242 253 253 253 253 253 253 253 253 253
62593 -253 253 253 253 253 253 253 253 253 253 253 253
62594 -253 253 253 253 253 253 253 253 253 246 246 246
62595 -231 231 231 206 206 206 198 198 198 226 226 226
62596 - 94 94 94 2 2 6 6 6 6 38 38 38
62597 - 30 30 30 2 2 6 2 2 6 2 2 6
62598 - 2 2 6 2 2 6 62 62 62 66 66 66
62599 - 26 26 26 10 10 10 0 0 0 0 0 0
62600 - 0 0 0 0 0 0 0 0 0 0 0 0
62601 - 0 0 0 0 0 0 0 0 0 0 0 0
62602 - 0 0 0 0 0 0 0 0 0 0 0 0
62603 - 0 0 0 0 0 0 0 0 0 0 0 0
62604 - 0 0 0 0 0 0 0 0 0 0 0 0
62605 - 0 0 0 0 0 0 0 0 0 0 0 0
62606 - 0 0 0 0 0 0 0 0 0 0 0 0
62607 - 0 0 0 0 0 0 0 0 0 10 10 10
62608 - 30 30 30 74 74 74 50 50 50 2 2 6
62609 - 26 26 26 26 26 26 2 2 6 106 106 106
62610 -238 238 238 253 253 253 253 253 253 253 253 253
62611 -253 253 253 253 253 253 253 253 253 253 253 253
62612 -253 253 253 253 253 253 253 253 253 253 253 253
62613 -253 253 253 253 253 253 253 253 253 253 253 253
62614 -253 253 253 253 253 253 253 253 253 253 253 253
62615 -253 253 253 246 246 246 218 218 218 202 202 202
62616 -210 210 210 14 14 14 2 2 6 2 2 6
62617 - 30 30 30 22 22 22 2 2 6 2 2 6
62618 - 2 2 6 2 2 6 18 18 18 86 86 86
62619 - 42 42 42 14 14 14 0 0 0 0 0 0
62620 - 0 0 0 0 0 0 0 0 0 0 0 0
62621 - 0 0 0 0 0 0 0 0 0 0 0 0
62622 - 0 0 0 0 0 0 0 0 0 0 0 0
62623 - 0 0 0 0 0 0 0 0 0 0 0 0
62624 - 0 0 0 0 0 0 0 0 0 0 0 0
62625 - 0 0 0 0 0 0 0 0 0 0 0 0
62626 - 0 0 0 0 0 0 0 0 0 0 0 0
62627 - 0 0 0 0 0 0 0 0 0 14 14 14
62628 - 42 42 42 90 90 90 22 22 22 2 2 6
62629 - 42 42 42 2 2 6 18 18 18 218 218 218
62630 -253 253 253 253 253 253 253 253 253 253 253 253
62631 -253 253 253 253 253 253 253 253 253 253 253 253
62632 -253 253 253 253 253 253 253 253 253 253 253 253
62633 -253 253 253 253 253 253 253 253 253 253 253 253
62634 -253 253 253 253 253 253 253 253 253 253 253 253
62635 -253 253 253 253 253 253 250 250 250 221 221 221
62636 -218 218 218 101 101 101 2 2 6 14 14 14
62637 - 18 18 18 38 38 38 10 10 10 2 2 6
62638 - 2 2 6 2 2 6 2 2 6 78 78 78
62639 - 58 58 58 22 22 22 6 6 6 0 0 0
62640 - 0 0 0 0 0 0 0 0 0 0 0 0
62641 - 0 0 0 0 0 0 0 0 0 0 0 0
62642 - 0 0 0 0 0 0 0 0 0 0 0 0
62643 - 0 0 0 0 0 0 0 0 0 0 0 0
62644 - 0 0 0 0 0 0 0 0 0 0 0 0
62645 - 0 0 0 0 0 0 0 0 0 0 0 0
62646 - 0 0 0 0 0 0 0 0 0 0 0 0
62647 - 0 0 0 0 0 0 6 6 6 18 18 18
62648 - 54 54 54 82 82 82 2 2 6 26 26 26
62649 - 22 22 22 2 2 6 123 123 123 253 253 253
62650 -253 253 253 253 253 253 253 253 253 253 253 253
62651 -253 253 253 253 253 253 253 253 253 253 253 253
62652 -253 253 253 253 253 253 253 253 253 253 253 253
62653 -253 253 253 253 253 253 253 253 253 253 253 253
62654 -253 253 253 253 253 253 253 253 253 253 253 253
62655 -253 253 253 253 253 253 253 253 253 250 250 250
62656 -238 238 238 198 198 198 6 6 6 38 38 38
62657 - 58 58 58 26 26 26 38 38 38 2 2 6
62658 - 2 2 6 2 2 6 2 2 6 46 46 46
62659 - 78 78 78 30 30 30 10 10 10 0 0 0
62660 - 0 0 0 0 0 0 0 0 0 0 0 0
62661 - 0 0 0 0 0 0 0 0 0 0 0 0
62662 - 0 0 0 0 0 0 0 0 0 0 0 0
62663 - 0 0 0 0 0 0 0 0 0 0 0 0
62664 - 0 0 0 0 0 0 0 0 0 0 0 0
62665 - 0 0 0 0 0 0 0 0 0 0 0 0
62666 - 0 0 0 0 0 0 0 0 0 0 0 0
62667 - 0 0 0 0 0 0 10 10 10 30 30 30
62668 - 74 74 74 58 58 58 2 2 6 42 42 42
62669 - 2 2 6 22 22 22 231 231 231 253 253 253
62670 -253 253 253 253 253 253 253 253 253 253 253 253
62671 -253 253 253 253 253 253 253 253 253 250 250 250
62672 -253 253 253 253 253 253 253 253 253 253 253 253
62673 -253 253 253 253 253 253 253 253 253 253 253 253
62674 -253 253 253 253 253 253 253 253 253 253 253 253
62675 -253 253 253 253 253 253 253 253 253 253 253 253
62676 -253 253 253 246 246 246 46 46 46 38 38 38
62677 - 42 42 42 14 14 14 38 38 38 14 14 14
62678 - 2 2 6 2 2 6 2 2 6 6 6 6
62679 - 86 86 86 46 46 46 14 14 14 0 0 0
62680 - 0 0 0 0 0 0 0 0 0 0 0 0
62681 - 0 0 0 0 0 0 0 0 0 0 0 0
62682 - 0 0 0 0 0 0 0 0 0 0 0 0
62683 - 0 0 0 0 0 0 0 0 0 0 0 0
62684 - 0 0 0 0 0 0 0 0 0 0 0 0
62685 - 0 0 0 0 0 0 0 0 0 0 0 0
62686 - 0 0 0 0 0 0 0 0 0 0 0 0
62687 - 0 0 0 6 6 6 14 14 14 42 42 42
62688 - 90 90 90 18 18 18 18 18 18 26 26 26
62689 - 2 2 6 116 116 116 253 253 253 253 253 253
62690 -253 253 253 253 253 253 253 253 253 253 253 253
62691 -253 253 253 253 253 253 250 250 250 238 238 238
62692 -253 253 253 253 253 253 253 253 253 253 253 253
62693 -253 253 253 253 253 253 253 253 253 253 253 253
62694 -253 253 253 253 253 253 253 253 253 253 253 253
62695 -253 253 253 253 253 253 253 253 253 253 253 253
62696 -253 253 253 253 253 253 94 94 94 6 6 6
62697 - 2 2 6 2 2 6 10 10 10 34 34 34
62698 - 2 2 6 2 2 6 2 2 6 2 2 6
62699 - 74 74 74 58 58 58 22 22 22 6 6 6
62700 - 0 0 0 0 0 0 0 0 0 0 0 0
62701 - 0 0 0 0 0 0 0 0 0 0 0 0
62702 - 0 0 0 0 0 0 0 0 0 0 0 0
62703 - 0 0 0 0 0 0 0 0 0 0 0 0
62704 - 0 0 0 0 0 0 0 0 0 0 0 0
62705 - 0 0 0 0 0 0 0 0 0 0 0 0
62706 - 0 0 0 0 0 0 0 0 0 0 0 0
62707 - 0 0 0 10 10 10 26 26 26 66 66 66
62708 - 82 82 82 2 2 6 38 38 38 6 6 6
62709 - 14 14 14 210 210 210 253 253 253 253 253 253
62710 -253 253 253 253 253 253 253 253 253 253 253 253
62711 -253 253 253 253 253 253 246 246 246 242 242 242
62712 -253 253 253 253 253 253 253 253 253 253 253 253
62713 -253 253 253 253 253 253 253 253 253 253 253 253
62714 -253 253 253 253 253 253 253 253 253 253 253 253
62715 -253 253 253 253 253 253 253 253 253 253 253 253
62716 -253 253 253 253 253 253 144 144 144 2 2 6
62717 - 2 2 6 2 2 6 2 2 6 46 46 46
62718 - 2 2 6 2 2 6 2 2 6 2 2 6
62719 - 42 42 42 74 74 74 30 30 30 10 10 10
62720 - 0 0 0 0 0 0 0 0 0 0 0 0
62721 - 0 0 0 0 0 0 0 0 0 0 0 0
62722 - 0 0 0 0 0 0 0 0 0 0 0 0
62723 - 0 0 0 0 0 0 0 0 0 0 0 0
62724 - 0 0 0 0 0 0 0 0 0 0 0 0
62725 - 0 0 0 0 0 0 0 0 0 0 0 0
62726 - 0 0 0 0 0 0 0 0 0 0 0 0
62727 - 6 6 6 14 14 14 42 42 42 90 90 90
62728 - 26 26 26 6 6 6 42 42 42 2 2 6
62729 - 74 74 74 250 250 250 253 253 253 253 253 253
62730 -253 253 253 253 253 253 253 253 253 253 253 253
62731 -253 253 253 253 253 253 242 242 242 242 242 242
62732 -253 253 253 253 253 253 253 253 253 253 253 253
62733 -253 253 253 253 253 253 253 253 253 253 253 253
62734 -253 253 253 253 253 253 253 253 253 253 253 253
62735 -253 253 253 253 253 253 253 253 253 253 253 253
62736 -253 253 253 253 253 253 182 182 182 2 2 6
62737 - 2 2 6 2 2 6 2 2 6 46 46 46
62738 - 2 2 6 2 2 6 2 2 6 2 2 6
62739 - 10 10 10 86 86 86 38 38 38 10 10 10
62740 - 0 0 0 0 0 0 0 0 0 0 0 0
62741 - 0 0 0 0 0 0 0 0 0 0 0 0
62742 - 0 0 0 0 0 0 0 0 0 0 0 0
62743 - 0 0 0 0 0 0 0 0 0 0 0 0
62744 - 0 0 0 0 0 0 0 0 0 0 0 0
62745 - 0 0 0 0 0 0 0 0 0 0 0 0
62746 - 0 0 0 0 0 0 0 0 0 0 0 0
62747 - 10 10 10 26 26 26 66 66 66 82 82 82
62748 - 2 2 6 22 22 22 18 18 18 2 2 6
62749 -149 149 149 253 253 253 253 253 253 253 253 253
62750 -253 253 253 253 253 253 253 253 253 253 253 253
62751 -253 253 253 253 253 253 234 234 234 242 242 242
62752 -253 253 253 253 253 253 253 253 253 253 253 253
62753 -253 253 253 253 253 253 253 253 253 253 253 253
62754 -253 253 253 253 253 253 253 253 253 253 253 253
62755 -253 253 253 253 253 253 253 253 253 253 253 253
62756 -253 253 253 253 253 253 206 206 206 2 2 6
62757 - 2 2 6 2 2 6 2 2 6 38 38 38
62758 - 2 2 6 2 2 6 2 2 6 2 2 6
62759 - 6 6 6 86 86 86 46 46 46 14 14 14
62760 - 0 0 0 0 0 0 0 0 0 0 0 0
62761 - 0 0 0 0 0 0 0 0 0 0 0 0
62762 - 0 0 0 0 0 0 0 0 0 0 0 0
62763 - 0 0 0 0 0 0 0 0 0 0 0 0
62764 - 0 0 0 0 0 0 0 0 0 0 0 0
62765 - 0 0 0 0 0 0 0 0 0 0 0 0
62766 - 0 0 0 0 0 0 0 0 0 6 6 6
62767 - 18 18 18 46 46 46 86 86 86 18 18 18
62768 - 2 2 6 34 34 34 10 10 10 6 6 6
62769 -210 210 210 253 253 253 253 253 253 253 253 253
62770 -253 253 253 253 253 253 253 253 253 253 253 253
62771 -253 253 253 253 253 253 234 234 234 242 242 242
62772 -253 253 253 253 253 253 253 253 253 253 253 253
62773 -253 253 253 253 253 253 253 253 253 253 253 253
62774 -253 253 253 253 253 253 253 253 253 253 253 253
62775 -253 253 253 253 253 253 253 253 253 253 253 253
62776 -253 253 253 253 253 253 221 221 221 6 6 6
62777 - 2 2 6 2 2 6 6 6 6 30 30 30
62778 - 2 2 6 2 2 6 2 2 6 2 2 6
62779 - 2 2 6 82 82 82 54 54 54 18 18 18
62780 - 6 6 6 0 0 0 0 0 0 0 0 0
62781 - 0 0 0 0 0 0 0 0 0 0 0 0
62782 - 0 0 0 0 0 0 0 0 0 0 0 0
62783 - 0 0 0 0 0 0 0 0 0 0 0 0
62784 - 0 0 0 0 0 0 0 0 0 0 0 0
62785 - 0 0 0 0 0 0 0 0 0 0 0 0
62786 - 0 0 0 0 0 0 0 0 0 10 10 10
62787 - 26 26 26 66 66 66 62 62 62 2 2 6
62788 - 2 2 6 38 38 38 10 10 10 26 26 26
62789 -238 238 238 253 253 253 253 253 253 253 253 253
62790 -253 253 253 253 253 253 253 253 253 253 253 253
62791 -253 253 253 253 253 253 231 231 231 238 238 238
62792 -253 253 253 253 253 253 253 253 253 253 253 253
62793 -253 253 253 253 253 253 253 253 253 253 253 253
62794 -253 253 253 253 253 253 253 253 253 253 253 253
62795 -253 253 253 253 253 253 253 253 253 253 253 253
62796 -253 253 253 253 253 253 231 231 231 6 6 6
62797 - 2 2 6 2 2 6 10 10 10 30 30 30
62798 - 2 2 6 2 2 6 2 2 6 2 2 6
62799 - 2 2 6 66 66 66 58 58 58 22 22 22
62800 - 6 6 6 0 0 0 0 0 0 0 0 0
62801 - 0 0 0 0 0 0 0 0 0 0 0 0
62802 - 0 0 0 0 0 0 0 0 0 0 0 0
62803 - 0 0 0 0 0 0 0 0 0 0 0 0
62804 - 0 0 0 0 0 0 0 0 0 0 0 0
62805 - 0 0 0 0 0 0 0 0 0 0 0 0
62806 - 0 0 0 0 0 0 0 0 0 10 10 10
62807 - 38 38 38 78 78 78 6 6 6 2 2 6
62808 - 2 2 6 46 46 46 14 14 14 42 42 42
62809 -246 246 246 253 253 253 253 253 253 253 253 253
62810 -253 253 253 253 253 253 253 253 253 253 253 253
62811 -253 253 253 253 253 253 231 231 231 242 242 242
62812 -253 253 253 253 253 253 253 253 253 253 253 253
62813 -253 253 253 253 253 253 253 253 253 253 253 253
62814 -253 253 253 253 253 253 253 253 253 253 253 253
62815 -253 253 253 253 253 253 253 253 253 253 253 253
62816 -253 253 253 253 253 253 234 234 234 10 10 10
62817 - 2 2 6 2 2 6 22 22 22 14 14 14
62818 - 2 2 6 2 2 6 2 2 6 2 2 6
62819 - 2 2 6 66 66 66 62 62 62 22 22 22
62820 - 6 6 6 0 0 0 0 0 0 0 0 0
62821 - 0 0 0 0 0 0 0 0 0 0 0 0
62822 - 0 0 0 0 0 0 0 0 0 0 0 0
62823 - 0 0 0 0 0 0 0 0 0 0 0 0
62824 - 0 0 0 0 0 0 0 0 0 0 0 0
62825 - 0 0 0 0 0 0 0 0 0 0 0 0
62826 - 0 0 0 0 0 0 6 6 6 18 18 18
62827 - 50 50 50 74 74 74 2 2 6 2 2 6
62828 - 14 14 14 70 70 70 34 34 34 62 62 62
62829 -250 250 250 253 253 253 253 253 253 253 253 253
62830 -253 253 253 253 253 253 253 253 253 253 253 253
62831 -253 253 253 253 253 253 231 231 231 246 246 246
62832 -253 253 253 253 253 253 253 253 253 253 253 253
62833 -253 253 253 253 253 253 253 253 253 253 253 253
62834 -253 253 253 253 253 253 253 253 253 253 253 253
62835 -253 253 253 253 253 253 253 253 253 253 253 253
62836 -253 253 253 253 253 253 234 234 234 14 14 14
62837 - 2 2 6 2 2 6 30 30 30 2 2 6
62838 - 2 2 6 2 2 6 2 2 6 2 2 6
62839 - 2 2 6 66 66 66 62 62 62 22 22 22
62840 - 6 6 6 0 0 0 0 0 0 0 0 0
62841 - 0 0 0 0 0 0 0 0 0 0 0 0
62842 - 0 0 0 0 0 0 0 0 0 0 0 0
62843 - 0 0 0 0 0 0 0 0 0 0 0 0
62844 - 0 0 0 0 0 0 0 0 0 0 0 0
62845 - 0 0 0 0 0 0 0 0 0 0 0 0
62846 - 0 0 0 0 0 0 6 6 6 18 18 18
62847 - 54 54 54 62 62 62 2 2 6 2 2 6
62848 - 2 2 6 30 30 30 46 46 46 70 70 70
62849 -250 250 250 253 253 253 253 253 253 253 253 253
62850 -253 253 253 253 253 253 253 253 253 253 253 253
62851 -253 253 253 253 253 253 231 231 231 246 246 246
62852 -253 253 253 253 253 253 253 253 253 253 253 253
62853 -253 253 253 253 253 253 253 253 253 253 253 253
62854 -253 253 253 253 253 253 253 253 253 253 253 253
62855 -253 253 253 253 253 253 253 253 253 253 253 253
62856 -253 253 253 253 253 253 226 226 226 10 10 10
62857 - 2 2 6 6 6 6 30 30 30 2 2 6
62858 - 2 2 6 2 2 6 2 2 6 2 2 6
62859 - 2 2 6 66 66 66 58 58 58 22 22 22
62860 - 6 6 6 0 0 0 0 0 0 0 0 0
62861 - 0 0 0 0 0 0 0 0 0 0 0 0
62862 - 0 0 0 0 0 0 0 0 0 0 0 0
62863 - 0 0 0 0 0 0 0 0 0 0 0 0
62864 - 0 0 0 0 0 0 0 0 0 0 0 0
62865 - 0 0 0 0 0 0 0 0 0 0 0 0
62866 - 0 0 0 0 0 0 6 6 6 22 22 22
62867 - 58 58 58 62 62 62 2 2 6 2 2 6
62868 - 2 2 6 2 2 6 30 30 30 78 78 78
62869 -250 250 250 253 253 253 253 253 253 253 253 253
62870 -253 253 253 253 253 253 253 253 253 253 253 253
62871 -253 253 253 253 253 253 231 231 231 246 246 246
62872 -253 253 253 253 253 253 253 253 253 253 253 253
62873 -253 253 253 253 253 253 253 253 253 253 253 253
62874 -253 253 253 253 253 253 253 253 253 253 253 253
62875 -253 253 253 253 253 253 253 253 253 253 253 253
62876 -253 253 253 253 253 253 206 206 206 2 2 6
62877 - 22 22 22 34 34 34 18 14 6 22 22 22
62878 - 26 26 26 18 18 18 6 6 6 2 2 6
62879 - 2 2 6 82 82 82 54 54 54 18 18 18
62880 - 6 6 6 0 0 0 0 0 0 0 0 0
62881 - 0 0 0 0 0 0 0 0 0 0 0 0
62882 - 0 0 0 0 0 0 0 0 0 0 0 0
62883 - 0 0 0 0 0 0 0 0 0 0 0 0
62884 - 0 0 0 0 0 0 0 0 0 0 0 0
62885 - 0 0 0 0 0 0 0 0 0 0 0 0
62886 - 0 0 0 0 0 0 6 6 6 26 26 26
62887 - 62 62 62 106 106 106 74 54 14 185 133 11
62888 -210 162 10 121 92 8 6 6 6 62 62 62
62889 -238 238 238 253 253 253 253 253 253 253 253 253
62890 -253 253 253 253 253 253 253 253 253 253 253 253
62891 -253 253 253 253 253 253 231 231 231 246 246 246
62892 -253 253 253 253 253 253 253 253 253 253 253 253
62893 -253 253 253 253 253 253 253 253 253 253 253 253
62894 -253 253 253 253 253 253 253 253 253 253 253 253
62895 -253 253 253 253 253 253 253 253 253 253 253 253
62896 -253 253 253 253 253 253 158 158 158 18 18 18
62897 - 14 14 14 2 2 6 2 2 6 2 2 6
62898 - 6 6 6 18 18 18 66 66 66 38 38 38
62899 - 6 6 6 94 94 94 50 50 50 18 18 18
62900 - 6 6 6 0 0 0 0 0 0 0 0 0
62901 - 0 0 0 0 0 0 0 0 0 0 0 0
62902 - 0 0 0 0 0 0 0 0 0 0 0 0
62903 - 0 0 0 0 0 0 0 0 0 0 0 0
62904 - 0 0 0 0 0 0 0 0 0 0 0 0
62905 - 0 0 0 0 0 0 0 0 0 6 6 6
62906 - 10 10 10 10 10 10 18 18 18 38 38 38
62907 - 78 78 78 142 134 106 216 158 10 242 186 14
62908 -246 190 14 246 190 14 156 118 10 10 10 10
62909 - 90 90 90 238 238 238 253 253 253 253 253 253
62910 -253 253 253 253 253 253 253 253 253 253 253 253
62911 -253 253 253 253 253 253 231 231 231 250 250 250
62912 -253 253 253 253 253 253 253 253 253 253 253 253
62913 -253 253 253 253 253 253 253 253 253 253 253 253
62914 -253 253 253 253 253 253 253 253 253 253 253 253
62915 -253 253 253 253 253 253 253 253 253 246 230 190
62916 -238 204 91 238 204 91 181 142 44 37 26 9
62917 - 2 2 6 2 2 6 2 2 6 2 2 6
62918 - 2 2 6 2 2 6 38 38 38 46 46 46
62919 - 26 26 26 106 106 106 54 54 54 18 18 18
62920 - 6 6 6 0 0 0 0 0 0 0 0 0
62921 - 0 0 0 0 0 0 0 0 0 0 0 0
62922 - 0 0 0 0 0 0 0 0 0 0 0 0
62923 - 0 0 0 0 0 0 0 0 0 0 0 0
62924 - 0 0 0 0 0 0 0 0 0 0 0 0
62925 - 0 0 0 6 6 6 14 14 14 22 22 22
62926 - 30 30 30 38 38 38 50 50 50 70 70 70
62927 -106 106 106 190 142 34 226 170 11 242 186 14
62928 -246 190 14 246 190 14 246 190 14 154 114 10
62929 - 6 6 6 74 74 74 226 226 226 253 253 253
62930 -253 253 253 253 253 253 253 253 253 253 253 253
62931 -253 253 253 253 253 253 231 231 231 250 250 250
62932 -253 253 253 253 253 253 253 253 253 253 253 253
62933 -253 253 253 253 253 253 253 253 253 253 253 253
62934 -253 253 253 253 253 253 253 253 253 253 253 253
62935 -253 253 253 253 253 253 253 253 253 228 184 62
62936 -241 196 14 241 208 19 232 195 16 38 30 10
62937 - 2 2 6 2 2 6 2 2 6 2 2 6
62938 - 2 2 6 6 6 6 30 30 30 26 26 26
62939 -203 166 17 154 142 90 66 66 66 26 26 26
62940 - 6 6 6 0 0 0 0 0 0 0 0 0
62941 - 0 0 0 0 0 0 0 0 0 0 0 0
62942 - 0 0 0 0 0 0 0 0 0 0 0 0
62943 - 0 0 0 0 0 0 0 0 0 0 0 0
62944 - 0 0 0 0 0 0 0 0 0 0 0 0
62945 - 6 6 6 18 18 18 38 38 38 58 58 58
62946 - 78 78 78 86 86 86 101 101 101 123 123 123
62947 -175 146 61 210 150 10 234 174 13 246 186 14
62948 -246 190 14 246 190 14 246 190 14 238 190 10
62949 -102 78 10 2 2 6 46 46 46 198 198 198
62950 -253 253 253 253 253 253 253 253 253 253 253 253
62951 -253 253 253 253 253 253 234 234 234 242 242 242
62952 -253 253 253 253 253 253 253 253 253 253 253 253
62953 -253 253 253 253 253 253 253 253 253 253 253 253
62954 -253 253 253 253 253 253 253 253 253 253 253 253
62955 -253 253 253 253 253 253 253 253 253 224 178 62
62956 -242 186 14 241 196 14 210 166 10 22 18 6
62957 - 2 2 6 2 2 6 2 2 6 2 2 6
62958 - 2 2 6 2 2 6 6 6 6 121 92 8
62959 -238 202 15 232 195 16 82 82 82 34 34 34
62960 - 10 10 10 0 0 0 0 0 0 0 0 0
62961 - 0 0 0 0 0 0 0 0 0 0 0 0
62962 - 0 0 0 0 0 0 0 0 0 0 0 0
62963 - 0 0 0 0 0 0 0 0 0 0 0 0
62964 - 0 0 0 0 0 0 0 0 0 0 0 0
62965 - 14 14 14 38 38 38 70 70 70 154 122 46
62966 -190 142 34 200 144 11 197 138 11 197 138 11
62967 -213 154 11 226 170 11 242 186 14 246 190 14
62968 -246 190 14 246 190 14 246 190 14 246 190 14
62969 -225 175 15 46 32 6 2 2 6 22 22 22
62970 -158 158 158 250 250 250 253 253 253 253 253 253
62971 -253 253 253 253 253 253 253 253 253 253 253 253
62972 -253 253 253 253 253 253 253 253 253 253 253 253
62973 -253 253 253 253 253 253 253 253 253 253 253 253
62974 -253 253 253 253 253 253 253 253 253 253 253 253
62975 -253 253 253 250 250 250 242 242 242 224 178 62
62976 -239 182 13 236 186 11 213 154 11 46 32 6
62977 - 2 2 6 2 2 6 2 2 6 2 2 6
62978 - 2 2 6 2 2 6 61 42 6 225 175 15
62979 -238 190 10 236 186 11 112 100 78 42 42 42
62980 - 14 14 14 0 0 0 0 0 0 0 0 0
62981 - 0 0 0 0 0 0 0 0 0 0 0 0
62982 - 0 0 0 0 0 0 0 0 0 0 0 0
62983 - 0 0 0 0 0 0 0 0 0 0 0 0
62984 - 0 0 0 0 0 0 0 0 0 6 6 6
62985 - 22 22 22 54 54 54 154 122 46 213 154 11
62986 -226 170 11 230 174 11 226 170 11 226 170 11
62987 -236 178 12 242 186 14 246 190 14 246 190 14
62988 -246 190 14 246 190 14 246 190 14 246 190 14
62989 -241 196 14 184 144 12 10 10 10 2 2 6
62990 - 6 6 6 116 116 116 242 242 242 253 253 253
62991 -253 253 253 253 253 253 253 253 253 253 253 253
62992 -253 253 253 253 253 253 253 253 253 253 253 253
62993 -253 253 253 253 253 253 253 253 253 253 253 253
62994 -253 253 253 253 253 253 253 253 253 253 253 253
62995 -253 253 253 231 231 231 198 198 198 214 170 54
62996 -236 178 12 236 178 12 210 150 10 137 92 6
62997 - 18 14 6 2 2 6 2 2 6 2 2 6
62998 - 6 6 6 70 47 6 200 144 11 236 178 12
62999 -239 182 13 239 182 13 124 112 88 58 58 58
63000 - 22 22 22 6 6 6 0 0 0 0 0 0
63001 - 0 0 0 0 0 0 0 0 0 0 0 0
63002 - 0 0 0 0 0 0 0 0 0 0 0 0
63003 - 0 0 0 0 0 0 0 0 0 0 0 0
63004 - 0 0 0 0 0 0 0 0 0 10 10 10
63005 - 30 30 30 70 70 70 180 133 36 226 170 11
63006 -239 182 13 242 186 14 242 186 14 246 186 14
63007 -246 190 14 246 190 14 246 190 14 246 190 14
63008 -246 190 14 246 190 14 246 190 14 246 190 14
63009 -246 190 14 232 195 16 98 70 6 2 2 6
63010 - 2 2 6 2 2 6 66 66 66 221 221 221
63011 -253 253 253 253 253 253 253 253 253 253 253 253
63012 -253 253 253 253 253 253 253 253 253 253 253 253
63013 -253 253 253 253 253 253 253 253 253 253 253 253
63014 -253 253 253 253 253 253 253 253 253 253 253 253
63015 -253 253 253 206 206 206 198 198 198 214 166 58
63016 -230 174 11 230 174 11 216 158 10 192 133 9
63017 -163 110 8 116 81 8 102 78 10 116 81 8
63018 -167 114 7 197 138 11 226 170 11 239 182 13
63019 -242 186 14 242 186 14 162 146 94 78 78 78
63020 - 34 34 34 14 14 14 6 6 6 0 0 0
63021 - 0 0 0 0 0 0 0 0 0 0 0 0
63022 - 0 0 0 0 0 0 0 0 0 0 0 0
63023 - 0 0 0 0 0 0 0 0 0 0 0 0
63024 - 0 0 0 0 0 0 0 0 0 6 6 6
63025 - 30 30 30 78 78 78 190 142 34 226 170 11
63026 -239 182 13 246 190 14 246 190 14 246 190 14
63027 -246 190 14 246 190 14 246 190 14 246 190 14
63028 -246 190 14 246 190 14 246 190 14 246 190 14
63029 -246 190 14 241 196 14 203 166 17 22 18 6
63030 - 2 2 6 2 2 6 2 2 6 38 38 38
63031 -218 218 218 253 253 253 253 253 253 253 253 253
63032 -253 253 253 253 253 253 253 253 253 253 253 253
63033 -253 253 253 253 253 253 253 253 253 253 253 253
63034 -253 253 253 253 253 253 253 253 253 253 253 253
63035 -250 250 250 206 206 206 198 198 198 202 162 69
63036 -226 170 11 236 178 12 224 166 10 210 150 10
63037 -200 144 11 197 138 11 192 133 9 197 138 11
63038 -210 150 10 226 170 11 242 186 14 246 190 14
63039 -246 190 14 246 186 14 225 175 15 124 112 88
63040 - 62 62 62 30 30 30 14 14 14 6 6 6
63041 - 0 0 0 0 0 0 0 0 0 0 0 0
63042 - 0 0 0 0 0 0 0 0 0 0 0 0
63043 - 0 0 0 0 0 0 0 0 0 0 0 0
63044 - 0 0 0 0 0 0 0 0 0 10 10 10
63045 - 30 30 30 78 78 78 174 135 50 224 166 10
63046 -239 182 13 246 190 14 246 190 14 246 190 14
63047 -246 190 14 246 190 14 246 190 14 246 190 14
63048 -246 190 14 246 190 14 246 190 14 246 190 14
63049 -246 190 14 246 190 14 241 196 14 139 102 15
63050 - 2 2 6 2 2 6 2 2 6 2 2 6
63051 - 78 78 78 250 250 250 253 253 253 253 253 253
63052 -253 253 253 253 253 253 253 253 253 253 253 253
63053 -253 253 253 253 253 253 253 253 253 253 253 253
63054 -253 253 253 253 253 253 253 253 253 253 253 253
63055 -250 250 250 214 214 214 198 198 198 190 150 46
63056 -219 162 10 236 178 12 234 174 13 224 166 10
63057 -216 158 10 213 154 11 213 154 11 216 158 10
63058 -226 170 11 239 182 13 246 190 14 246 190 14
63059 -246 190 14 246 190 14 242 186 14 206 162 42
63060 -101 101 101 58 58 58 30 30 30 14 14 14
63061 - 6 6 6 0 0 0 0 0 0 0 0 0
63062 - 0 0 0 0 0 0 0 0 0 0 0 0
63063 - 0 0 0 0 0 0 0 0 0 0 0 0
63064 - 0 0 0 0 0 0 0 0 0 10 10 10
63065 - 30 30 30 74 74 74 174 135 50 216 158 10
63066 -236 178 12 246 190 14 246 190 14 246 190 14
63067 -246 190 14 246 190 14 246 190 14 246 190 14
63068 -246 190 14 246 190 14 246 190 14 246 190 14
63069 -246 190 14 246 190 14 241 196 14 226 184 13
63070 - 61 42 6 2 2 6 2 2 6 2 2 6
63071 - 22 22 22 238 238 238 253 253 253 253 253 253
63072 -253 253 253 253 253 253 253 253 253 253 253 253
63073 -253 253 253 253 253 253 253 253 253 253 253 253
63074 -253 253 253 253 253 253 253 253 253 253 253 253
63075 -253 253 253 226 226 226 187 187 187 180 133 36
63076 -216 158 10 236 178 12 239 182 13 236 178 12
63077 -230 174 11 226 170 11 226 170 11 230 174 11
63078 -236 178 12 242 186 14 246 190 14 246 190 14
63079 -246 190 14 246 190 14 246 186 14 239 182 13
63080 -206 162 42 106 106 106 66 66 66 34 34 34
63081 - 14 14 14 6 6 6 0 0 0 0 0 0
63082 - 0 0 0 0 0 0 0 0 0 0 0 0
63083 - 0 0 0 0 0 0 0 0 0 0 0 0
63084 - 0 0 0 0 0 0 0 0 0 6 6 6
63085 - 26 26 26 70 70 70 163 133 67 213 154 11
63086 -236 178 12 246 190 14 246 190 14 246 190 14
63087 -246 190 14 246 190 14 246 190 14 246 190 14
63088 -246 190 14 246 190 14 246 190 14 246 190 14
63089 -246 190 14 246 190 14 246 190 14 241 196 14
63090 -190 146 13 18 14 6 2 2 6 2 2 6
63091 - 46 46 46 246 246 246 253 253 253 253 253 253
63092 -253 253 253 253 253 253 253 253 253 253 253 253
63093 -253 253 253 253 253 253 253 253 253 253 253 253
63094 -253 253 253 253 253 253 253 253 253 253 253 253
63095 -253 253 253 221 221 221 86 86 86 156 107 11
63096 -216 158 10 236 178 12 242 186 14 246 186 14
63097 -242 186 14 239 182 13 239 182 13 242 186 14
63098 -242 186 14 246 186 14 246 190 14 246 190 14
63099 -246 190 14 246 190 14 246 190 14 246 190 14
63100 -242 186 14 225 175 15 142 122 72 66 66 66
63101 - 30 30 30 10 10 10 0 0 0 0 0 0
63102 - 0 0 0 0 0 0 0 0 0 0 0 0
63103 - 0 0 0 0 0 0 0 0 0 0 0 0
63104 - 0 0 0 0 0 0 0 0 0 6 6 6
63105 - 26 26 26 70 70 70 163 133 67 210 150 10
63106 -236 178 12 246 190 14 246 190 14 246 190 14
63107 -246 190 14 246 190 14 246 190 14 246 190 14
63108 -246 190 14 246 190 14 246 190 14 246 190 14
63109 -246 190 14 246 190 14 246 190 14 246 190 14
63110 -232 195 16 121 92 8 34 34 34 106 106 106
63111 -221 221 221 253 253 253 253 253 253 253 253 253
63112 -253 253 253 253 253 253 253 253 253 253 253 253
63113 -253 253 253 253 253 253 253 253 253 253 253 253
63114 -253 253 253 253 253 253 253 253 253 253 253 253
63115 -242 242 242 82 82 82 18 14 6 163 110 8
63116 -216 158 10 236 178 12 242 186 14 246 190 14
63117 -246 190 14 246 190 14 246 190 14 246 190 14
63118 -246 190 14 246 190 14 246 190 14 246 190 14
63119 -246 190 14 246 190 14 246 190 14 246 190 14
63120 -246 190 14 246 190 14 242 186 14 163 133 67
63121 - 46 46 46 18 18 18 6 6 6 0 0 0
63122 - 0 0 0 0 0 0 0 0 0 0 0 0
63123 - 0 0 0 0 0 0 0 0 0 0 0 0
63124 - 0 0 0 0 0 0 0 0 0 10 10 10
63125 - 30 30 30 78 78 78 163 133 67 210 150 10
63126 -236 178 12 246 186 14 246 190 14 246 190 14
63127 -246 190 14 246 190 14 246 190 14 246 190 14
63128 -246 190 14 246 190 14 246 190 14 246 190 14
63129 -246 190 14 246 190 14 246 190 14 246 190 14
63130 -241 196 14 215 174 15 190 178 144 253 253 253
63131 -253 253 253 253 253 253 253 253 253 253 253 253
63132 -253 253 253 253 253 253 253 253 253 253 253 253
63133 -253 253 253 253 253 253 253 253 253 253 253 253
63134 -253 253 253 253 253 253 253 253 253 218 218 218
63135 - 58 58 58 2 2 6 22 18 6 167 114 7
63136 -216 158 10 236 178 12 246 186 14 246 190 14
63137 -246 190 14 246 190 14 246 190 14 246 190 14
63138 -246 190 14 246 190 14 246 190 14 246 190 14
63139 -246 190 14 246 190 14 246 190 14 246 190 14
63140 -246 190 14 246 186 14 242 186 14 190 150 46
63141 - 54 54 54 22 22 22 6 6 6 0 0 0
63142 - 0 0 0 0 0 0 0 0 0 0 0 0
63143 - 0 0 0 0 0 0 0 0 0 0 0 0
63144 - 0 0 0 0 0 0 0 0 0 14 14 14
63145 - 38 38 38 86 86 86 180 133 36 213 154 11
63146 -236 178 12 246 186 14 246 190 14 246 190 14
63147 -246 190 14 246 190 14 246 190 14 246 190 14
63148 -246 190 14 246 190 14 246 190 14 246 190 14
63149 -246 190 14 246 190 14 246 190 14 246 190 14
63150 -246 190 14 232 195 16 190 146 13 214 214 214
63151 -253 253 253 253 253 253 253 253 253 253 253 253
63152 -253 253 253 253 253 253 253 253 253 253 253 253
63153 -253 253 253 253 253 253 253 253 253 253 253 253
63154 -253 253 253 250 250 250 170 170 170 26 26 26
63155 - 2 2 6 2 2 6 37 26 9 163 110 8
63156 -219 162 10 239 182 13 246 186 14 246 190 14
63157 -246 190 14 246 190 14 246 190 14 246 190 14
63158 -246 190 14 246 190 14 246 190 14 246 190 14
63159 -246 190 14 246 190 14 246 190 14 246 190 14
63160 -246 186 14 236 178 12 224 166 10 142 122 72
63161 - 46 46 46 18 18 18 6 6 6 0 0 0
63162 - 0 0 0 0 0 0 0 0 0 0 0 0
63163 - 0 0 0 0 0 0 0 0 0 0 0 0
63164 - 0 0 0 0 0 0 6 6 6 18 18 18
63165 - 50 50 50 109 106 95 192 133 9 224 166 10
63166 -242 186 14 246 190 14 246 190 14 246 190 14
63167 -246 190 14 246 190 14 246 190 14 246 190 14
63168 -246 190 14 246 190 14 246 190 14 246 190 14
63169 -246 190 14 246 190 14 246 190 14 246 190 14
63170 -242 186 14 226 184 13 210 162 10 142 110 46
63171 -226 226 226 253 253 253 253 253 253 253 253 253
63172 -253 253 253 253 253 253 253 253 253 253 253 253
63173 -253 253 253 253 253 253 253 253 253 253 253 253
63174 -198 198 198 66 66 66 2 2 6 2 2 6
63175 - 2 2 6 2 2 6 50 34 6 156 107 11
63176 -219 162 10 239 182 13 246 186 14 246 190 14
63177 -246 190 14 246 190 14 246 190 14 246 190 14
63178 -246 190 14 246 190 14 246 190 14 246 190 14
63179 -246 190 14 246 190 14 246 190 14 242 186 14
63180 -234 174 13 213 154 11 154 122 46 66 66 66
63181 - 30 30 30 10 10 10 0 0 0 0 0 0
63182 - 0 0 0 0 0 0 0 0 0 0 0 0
63183 - 0 0 0 0 0 0 0 0 0 0 0 0
63184 - 0 0 0 0 0 0 6 6 6 22 22 22
63185 - 58 58 58 154 121 60 206 145 10 234 174 13
63186 -242 186 14 246 186 14 246 190 14 246 190 14
63187 -246 190 14 246 190 14 246 190 14 246 190 14
63188 -246 190 14 246 190 14 246 190 14 246 190 14
63189 -246 190 14 246 190 14 246 190 14 246 190 14
63190 -246 186 14 236 178 12 210 162 10 163 110 8
63191 - 61 42 6 138 138 138 218 218 218 250 250 250
63192 -253 253 253 253 253 253 253 253 253 250 250 250
63193 -242 242 242 210 210 210 144 144 144 66 66 66
63194 - 6 6 6 2 2 6 2 2 6 2 2 6
63195 - 2 2 6 2 2 6 61 42 6 163 110 8
63196 -216 158 10 236 178 12 246 190 14 246 190 14
63197 -246 190 14 246 190 14 246 190 14 246 190 14
63198 -246 190 14 246 190 14 246 190 14 246 190 14
63199 -246 190 14 239 182 13 230 174 11 216 158 10
63200 -190 142 34 124 112 88 70 70 70 38 38 38
63201 - 18 18 18 6 6 6 0 0 0 0 0 0
63202 - 0 0 0 0 0 0 0 0 0 0 0 0
63203 - 0 0 0 0 0 0 0 0 0 0 0 0
63204 - 0 0 0 0 0 0 6 6 6 22 22 22
63205 - 62 62 62 168 124 44 206 145 10 224 166 10
63206 -236 178 12 239 182 13 242 186 14 242 186 14
63207 -246 186 14 246 190 14 246 190 14 246 190 14
63208 -246 190 14 246 190 14 246 190 14 246 190 14
63209 -246 190 14 246 190 14 246 190 14 246 190 14
63210 -246 190 14 236 178 12 216 158 10 175 118 6
63211 - 80 54 7 2 2 6 6 6 6 30 30 30
63212 - 54 54 54 62 62 62 50 50 50 38 38 38
63213 - 14 14 14 2 2 6 2 2 6 2 2 6
63214 - 2 2 6 2 2 6 2 2 6 2 2 6
63215 - 2 2 6 6 6 6 80 54 7 167 114 7
63216 -213 154 11 236 178 12 246 190 14 246 190 14
63217 -246 190 14 246 190 14 246 190 14 246 190 14
63218 -246 190 14 242 186 14 239 182 13 239 182 13
63219 -230 174 11 210 150 10 174 135 50 124 112 88
63220 - 82 82 82 54 54 54 34 34 34 18 18 18
63221 - 6 6 6 0 0 0 0 0 0 0 0 0
63222 - 0 0 0 0 0 0 0 0 0 0 0 0
63223 - 0 0 0 0 0 0 0 0 0 0 0 0
63224 - 0 0 0 0 0 0 6 6 6 18 18 18
63225 - 50 50 50 158 118 36 192 133 9 200 144 11
63226 -216 158 10 219 162 10 224 166 10 226 170 11
63227 -230 174 11 236 178 12 239 182 13 239 182 13
63228 -242 186 14 246 186 14 246 190 14 246 190 14
63229 -246 190 14 246 190 14 246 190 14 246 190 14
63230 -246 186 14 230 174 11 210 150 10 163 110 8
63231 -104 69 6 10 10 10 2 2 6 2 2 6
63232 - 2 2 6 2 2 6 2 2 6 2 2 6
63233 - 2 2 6 2 2 6 2 2 6 2 2 6
63234 - 2 2 6 2 2 6 2 2 6 2 2 6
63235 - 2 2 6 6 6 6 91 60 6 167 114 7
63236 -206 145 10 230 174 11 242 186 14 246 190 14
63237 -246 190 14 246 190 14 246 186 14 242 186 14
63238 -239 182 13 230 174 11 224 166 10 213 154 11
63239 -180 133 36 124 112 88 86 86 86 58 58 58
63240 - 38 38 38 22 22 22 10 10 10 6 6 6
63241 - 0 0 0 0 0 0 0 0 0 0 0 0
63242 - 0 0 0 0 0 0 0 0 0 0 0 0
63243 - 0 0 0 0 0 0 0 0 0 0 0 0
63244 - 0 0 0 0 0 0 0 0 0 14 14 14
63245 - 34 34 34 70 70 70 138 110 50 158 118 36
63246 -167 114 7 180 123 7 192 133 9 197 138 11
63247 -200 144 11 206 145 10 213 154 11 219 162 10
63248 -224 166 10 230 174 11 239 182 13 242 186 14
63249 -246 186 14 246 186 14 246 186 14 246 186 14
63250 -239 182 13 216 158 10 185 133 11 152 99 6
63251 -104 69 6 18 14 6 2 2 6 2 2 6
63252 - 2 2 6 2 2 6 2 2 6 2 2 6
63253 - 2 2 6 2 2 6 2 2 6 2 2 6
63254 - 2 2 6 2 2 6 2 2 6 2 2 6
63255 - 2 2 6 6 6 6 80 54 7 152 99 6
63256 -192 133 9 219 162 10 236 178 12 239 182 13
63257 -246 186 14 242 186 14 239 182 13 236 178 12
63258 -224 166 10 206 145 10 192 133 9 154 121 60
63259 - 94 94 94 62 62 62 42 42 42 22 22 22
63260 - 14 14 14 6 6 6 0 0 0 0 0 0
63261 - 0 0 0 0 0 0 0 0 0 0 0 0
63262 - 0 0 0 0 0 0 0 0 0 0 0 0
63263 - 0 0 0 0 0 0 0 0 0 0 0 0
63264 - 0 0 0 0 0 0 0 0 0 6 6 6
63265 - 18 18 18 34 34 34 58 58 58 78 78 78
63266 -101 98 89 124 112 88 142 110 46 156 107 11
63267 -163 110 8 167 114 7 175 118 6 180 123 7
63268 -185 133 11 197 138 11 210 150 10 219 162 10
63269 -226 170 11 236 178 12 236 178 12 234 174 13
63270 -219 162 10 197 138 11 163 110 8 130 83 6
63271 - 91 60 6 10 10 10 2 2 6 2 2 6
63272 - 18 18 18 38 38 38 38 38 38 38 38 38
63273 - 38 38 38 38 38 38 38 38 38 38 38 38
63274 - 38 38 38 38 38 38 26 26 26 2 2 6
63275 - 2 2 6 6 6 6 70 47 6 137 92 6
63276 -175 118 6 200 144 11 219 162 10 230 174 11
63277 -234 174 13 230 174 11 219 162 10 210 150 10
63278 -192 133 9 163 110 8 124 112 88 82 82 82
63279 - 50 50 50 30 30 30 14 14 14 6 6 6
63280 - 0 0 0 0 0 0 0 0 0 0 0 0
63281 - 0 0 0 0 0 0 0 0 0 0 0 0
63282 - 0 0 0 0 0 0 0 0 0 0 0 0
63283 - 0 0 0 0 0 0 0 0 0 0 0 0
63284 - 0 0 0 0 0 0 0 0 0 0 0 0
63285 - 6 6 6 14 14 14 22 22 22 34 34 34
63286 - 42 42 42 58 58 58 74 74 74 86 86 86
63287 -101 98 89 122 102 70 130 98 46 121 87 25
63288 -137 92 6 152 99 6 163 110 8 180 123 7
63289 -185 133 11 197 138 11 206 145 10 200 144 11
63290 -180 123 7 156 107 11 130 83 6 104 69 6
63291 - 50 34 6 54 54 54 110 110 110 101 98 89
63292 - 86 86 86 82 82 82 78 78 78 78 78 78
63293 - 78 78 78 78 78 78 78 78 78 78 78 78
63294 - 78 78 78 82 82 82 86 86 86 94 94 94
63295 -106 106 106 101 101 101 86 66 34 124 80 6
63296 -156 107 11 180 123 7 192 133 9 200 144 11
63297 -206 145 10 200 144 11 192 133 9 175 118 6
63298 -139 102 15 109 106 95 70 70 70 42 42 42
63299 - 22 22 22 10 10 10 0 0 0 0 0 0
63300 - 0 0 0 0 0 0 0 0 0 0 0 0
63301 - 0 0 0 0 0 0 0 0 0 0 0 0
63302 - 0 0 0 0 0 0 0 0 0 0 0 0
63303 - 0 0 0 0 0 0 0 0 0 0 0 0
63304 - 0 0 0 0 0 0 0 0 0 0 0 0
63305 - 0 0 0 0 0 0 6 6 6 10 10 10
63306 - 14 14 14 22 22 22 30 30 30 38 38 38
63307 - 50 50 50 62 62 62 74 74 74 90 90 90
63308 -101 98 89 112 100 78 121 87 25 124 80 6
63309 -137 92 6 152 99 6 152 99 6 152 99 6
63310 -138 86 6 124 80 6 98 70 6 86 66 30
63311 -101 98 89 82 82 82 58 58 58 46 46 46
63312 - 38 38 38 34 34 34 34 34 34 34 34 34
63313 - 34 34 34 34 34 34 34 34 34 34 34 34
63314 - 34 34 34 34 34 34 38 38 38 42 42 42
63315 - 54 54 54 82 82 82 94 86 76 91 60 6
63316 -134 86 6 156 107 11 167 114 7 175 118 6
63317 -175 118 6 167 114 7 152 99 6 121 87 25
63318 -101 98 89 62 62 62 34 34 34 18 18 18
63319 - 6 6 6 0 0 0 0 0 0 0 0 0
63320 - 0 0 0 0 0 0 0 0 0 0 0 0
63321 - 0 0 0 0 0 0 0 0 0 0 0 0
63322 - 0 0 0 0 0 0 0 0 0 0 0 0
63323 - 0 0 0 0 0 0 0 0 0 0 0 0
63324 - 0 0 0 0 0 0 0 0 0 0 0 0
63325 - 0 0 0 0 0 0 0 0 0 0 0 0
63326 - 0 0 0 6 6 6 6 6 6 10 10 10
63327 - 18 18 18 22 22 22 30 30 30 42 42 42
63328 - 50 50 50 66 66 66 86 86 86 101 98 89
63329 -106 86 58 98 70 6 104 69 6 104 69 6
63330 -104 69 6 91 60 6 82 62 34 90 90 90
63331 - 62 62 62 38 38 38 22 22 22 14 14 14
63332 - 10 10 10 10 10 10 10 10 10 10 10 10
63333 - 10 10 10 10 10 10 6 6 6 10 10 10
63334 - 10 10 10 10 10 10 10 10 10 14 14 14
63335 - 22 22 22 42 42 42 70 70 70 89 81 66
63336 - 80 54 7 104 69 6 124 80 6 137 92 6
63337 -134 86 6 116 81 8 100 82 52 86 86 86
63338 - 58 58 58 30 30 30 14 14 14 6 6 6
63339 - 0 0 0 0 0 0 0 0 0 0 0 0
63340 - 0 0 0 0 0 0 0 0 0 0 0 0
63341 - 0 0 0 0 0 0 0 0 0 0 0 0
63342 - 0 0 0 0 0 0 0 0 0 0 0 0
63343 - 0 0 0 0 0 0 0 0 0 0 0 0
63344 - 0 0 0 0 0 0 0 0 0 0 0 0
63345 - 0 0 0 0 0 0 0 0 0 0 0 0
63346 - 0 0 0 0 0 0 0 0 0 0 0 0
63347 - 0 0 0 6 6 6 10 10 10 14 14 14
63348 - 18 18 18 26 26 26 38 38 38 54 54 54
63349 - 70 70 70 86 86 86 94 86 76 89 81 66
63350 - 89 81 66 86 86 86 74 74 74 50 50 50
63351 - 30 30 30 14 14 14 6 6 6 0 0 0
63352 - 0 0 0 0 0 0 0 0 0 0 0 0
63353 - 0 0 0 0 0 0 0 0 0 0 0 0
63354 - 0 0 0 0 0 0 0 0 0 0 0 0
63355 - 6 6 6 18 18 18 34 34 34 58 58 58
63356 - 82 82 82 89 81 66 89 81 66 89 81 66
63357 - 94 86 66 94 86 76 74 74 74 50 50 50
63358 - 26 26 26 14 14 14 6 6 6 0 0 0
63359 - 0 0 0 0 0 0 0 0 0 0 0 0
63360 - 0 0 0 0 0 0 0 0 0 0 0 0
63361 - 0 0 0 0 0 0 0 0 0 0 0 0
63362 - 0 0 0 0 0 0 0 0 0 0 0 0
63363 - 0 0 0 0 0 0 0 0 0 0 0 0
63364 - 0 0 0 0 0 0 0 0 0 0 0 0
63365 - 0 0 0 0 0 0 0 0 0 0 0 0
63366 - 0 0 0 0 0 0 0 0 0 0 0 0
63367 - 0 0 0 0 0 0 0 0 0 0 0 0
63368 - 6 6 6 6 6 6 14 14 14 18 18 18
63369 - 30 30 30 38 38 38 46 46 46 54 54 54
63370 - 50 50 50 42 42 42 30 30 30 18 18 18
63371 - 10 10 10 0 0 0 0 0 0 0 0 0
63372 - 0 0 0 0 0 0 0 0 0 0 0 0
63373 - 0 0 0 0 0 0 0 0 0 0 0 0
63374 - 0 0 0 0 0 0 0 0 0 0 0 0
63375 - 0 0 0 6 6 6 14 14 14 26 26 26
63376 - 38 38 38 50 50 50 58 58 58 58 58 58
63377 - 54 54 54 42 42 42 30 30 30 18 18 18
63378 - 10 10 10 0 0 0 0 0 0 0 0 0
63379 - 0 0 0 0 0 0 0 0 0 0 0 0
63380 - 0 0 0 0 0 0 0 0 0 0 0 0
63381 - 0 0 0 0 0 0 0 0 0 0 0 0
63382 - 0 0 0 0 0 0 0 0 0 0 0 0
63383 - 0 0 0 0 0 0 0 0 0 0 0 0
63384 - 0 0 0 0 0 0 0 0 0 0 0 0
63385 - 0 0 0 0 0 0 0 0 0 0 0 0
63386 - 0 0 0 0 0 0 0 0 0 0 0 0
63387 - 0 0 0 0 0 0 0 0 0 0 0 0
63388 - 0 0 0 0 0 0 0 0 0 6 6 6
63389 - 6 6 6 10 10 10 14 14 14 18 18 18
63390 - 18 18 18 14 14 14 10 10 10 6 6 6
63391 - 0 0 0 0 0 0 0 0 0 0 0 0
63392 - 0 0 0 0 0 0 0 0 0 0 0 0
63393 - 0 0 0 0 0 0 0 0 0 0 0 0
63394 - 0 0 0 0 0 0 0 0 0 0 0 0
63395 - 0 0 0 0 0 0 0 0 0 6 6 6
63396 - 14 14 14 18 18 18 22 22 22 22 22 22
63397 - 18 18 18 14 14 14 10 10 10 6 6 6
63398 - 0 0 0 0 0 0 0 0 0 0 0 0
63399 - 0 0 0 0 0 0 0 0 0 0 0 0
63400 - 0 0 0 0 0 0 0 0 0 0 0 0
63401 - 0 0 0 0 0 0 0 0 0 0 0 0
63402 - 0 0 0 0 0 0 0 0 0 0 0 0
63403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63409 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63410 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63411 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63412 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63413 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63414 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63416 +4 4 4 4 4 4
63417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63424 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63425 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63426 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63427 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63430 +4 4 4 4 4 4
63431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63439 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63440 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63441 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63444 +4 4 4 4 4 4
63445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63453 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63458 +4 4 4 4 4 4
63459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63467 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63472 +4 4 4 4 4 4
63473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63481 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63486 +4 4 4 4 4 4
63487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63491 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
63492 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
63493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63494 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63495 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63496 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
63497 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63498 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
63499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63500 +4 4 4 4 4 4
63501 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63505 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
63506 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
63507 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63509 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63510 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
63511 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
63512 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
63513 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63514 +4 4 4 4 4 4
63515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63519 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
63520 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
63521 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63523 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63524 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
63525 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
63526 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
63527 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
63528 +4 4 4 4 4 4
63529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63532 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
63533 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
63534 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
63535 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
63536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63537 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
63538 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
63539 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
63540 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
63541 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
63542 +4 4 4 4 4 4
63543 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63546 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
63547 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
63548 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
63549 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
63550 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
63551 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
63552 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
63553 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
63554 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
63555 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
63556 +4 4 4 4 4 4
63557 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
63560 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
63561 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
63562 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
63563 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
63564 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
63565 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
63566 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
63567 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
63568 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
63569 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
63570 +4 4 4 4 4 4
63571 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63573 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
63574 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
63575 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
63576 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
63577 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
63578 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
63579 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
63580 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
63581 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
63582 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
63583 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
63584 +4 4 4 4 4 4
63585 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63587 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
63588 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
63589 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
63590 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
63591 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
63592 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
63593 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
63594 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
63595 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
63596 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
63597 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
63598 +4 4 4 4 4 4
63599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63601 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
63602 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
63603 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
63604 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
63605 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
63606 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
63607 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
63608 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
63609 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
63610 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
63611 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63612 +4 4 4 4 4 4
63613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63615 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
63616 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
63617 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
63618 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
63619 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
63620 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
63621 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
63622 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
63623 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
63624 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
63625 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
63626 +4 4 4 4 4 4
63627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63628 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
63629 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
63630 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
63631 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
63632 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
63633 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
63634 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
63635 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
63636 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
63637 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
63638 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
63639 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
63640 +4 4 4 4 4 4
63641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63642 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
63643 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
63644 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
63645 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
63646 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
63647 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
63648 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
63649 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
63650 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
63651 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
63652 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
63653 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
63654 +0 0 0 4 4 4
63655 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
63656 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
63657 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
63658 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
63659 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
63660 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
63661 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
63662 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
63663 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
63664 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
63665 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
63666 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
63667 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
63668 +2 0 0 0 0 0
63669 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
63670 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
63671 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
63672 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
63673 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
63674 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
63675 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
63676 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
63677 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
63678 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
63679 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
63680 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
63681 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
63682 +37 38 37 0 0 0
63683 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
63684 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
63685 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
63686 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
63687 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
63688 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
63689 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
63690 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
63691 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
63692 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
63693 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
63694 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
63695 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
63696 +85 115 134 4 0 0
63697 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
63698 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
63699 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
63700 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
63701 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
63702 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
63703 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
63704 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
63705 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
63706 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
63707 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
63708 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
63709 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
63710 +60 73 81 4 0 0
63711 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
63712 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
63713 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
63714 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
63715 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
63716 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
63717 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
63718 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
63719 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
63720 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
63721 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
63722 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
63723 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
63724 +16 19 21 4 0 0
63725 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
63726 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
63727 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
63728 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
63729 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
63730 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
63731 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
63732 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
63733 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
63734 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
63735 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
63736 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
63737 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
63738 +4 0 0 4 3 3
63739 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
63740 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
63741 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
63742 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
63743 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
63744 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
63745 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
63746 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
63747 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
63748 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
63749 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
63750 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
63751 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
63752 +3 2 2 4 4 4
63753 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
63754 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
63755 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
63756 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
63757 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
63758 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
63759 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
63760 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
63761 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
63762 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
63763 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
63764 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
63765 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
63766 +4 4 4 4 4 4
63767 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
63768 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
63769 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
63770 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
63771 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
63772 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
63773 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
63774 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
63775 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
63776 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
63777 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
63778 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
63779 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
63780 +4 4 4 4 4 4
63781 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
63782 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
63783 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
63784 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
63785 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
63786 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
63787 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
63788 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
63789 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
63790 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
63791 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
63792 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
63793 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
63794 +5 5 5 5 5 5
63795 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
63796 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
63797 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
63798 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
63799 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
63800 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63801 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
63802 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
63803 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
63804 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
63805 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
63806 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
63807 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
63808 +5 5 5 4 4 4
63809 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
63810 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
63811 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
63812 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
63813 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63814 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
63815 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
63816 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
63817 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
63818 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
63819 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
63820 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
63821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63822 +4 4 4 4 4 4
63823 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
63824 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
63825 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
63826 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
63827 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
63828 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63829 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63830 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
63831 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
63832 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
63833 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
63834 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
63835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63836 +4 4 4 4 4 4
63837 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
63838 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
63839 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
63840 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
63841 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63842 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
63843 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
63844 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
63845 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
63846 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
63847 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
63848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63850 +4 4 4 4 4 4
63851 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
63852 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
63853 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
63854 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
63855 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63856 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63857 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63858 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
63859 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
63860 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
63861 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
63862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63864 +4 4 4 4 4 4
63865 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
63866 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
63867 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
63868 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
63869 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63870 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
63871 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
63872 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
63873 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
63874 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
63875 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63878 +4 4 4 4 4 4
63879 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
63880 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
63881 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
63882 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
63883 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63884 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
63885 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
63886 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
63887 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
63888 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
63889 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
63890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63892 +4 4 4 4 4 4
63893 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
63894 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
63895 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
63896 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
63897 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63898 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
63899 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
63900 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
63901 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
63902 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
63903 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
63904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63906 +4 4 4 4 4 4
63907 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
63908 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
63909 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
63910 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
63911 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
63912 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
63913 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
63914 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
63915 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
63916 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
63917 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63920 +4 4 4 4 4 4
63921 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
63922 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
63923 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
63924 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
63925 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63926 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
63927 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
63928 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
63929 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
63930 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
63931 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63934 +4 4 4 4 4 4
63935 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
63936 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
63937 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
63938 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
63939 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63940 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
63941 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
63942 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
63943 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
63944 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
63945 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63948 +4 4 4 4 4 4
63949 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
63950 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
63951 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
63952 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
63953 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63954 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
63955 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
63956 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
63957 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
63958 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63959 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63962 +4 4 4 4 4 4
63963 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
63964 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
63965 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
63966 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
63967 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
63968 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
63969 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
63970 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
63971 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63972 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63973 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63976 +4 4 4 4 4 4
63977 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
63978 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
63979 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
63980 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
63981 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63982 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
63983 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
63984 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
63985 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
63986 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63987 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63990 +4 4 4 4 4 4
63991 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
63992 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
63993 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
63994 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
63995 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
63996 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
63997 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
63998 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
63999 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64000 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64001 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64004 +4 4 4 4 4 4
64005 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
64006 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
64007 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
64008 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
64009 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
64010 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
64011 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
64012 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
64013 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
64014 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64015 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64018 +4 4 4 4 4 4
64019 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
64020 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
64021 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
64022 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
64023 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
64024 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
64025 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
64026 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
64027 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64028 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64029 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64032 +4 4 4 4 4 4
64033 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
64034 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
64035 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
64036 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
64037 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
64038 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
64039 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
64040 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
64041 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
64042 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64043 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64046 +4 4 4 4 4 4
64047 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
64048 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
64049 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
64050 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
64051 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
64052 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
64053 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
64054 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
64055 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64056 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64057 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64060 +4 4 4 4 4 4
64061 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64062 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
64063 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
64064 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
64065 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
64066 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
64067 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
64068 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
64069 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
64070 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64071 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64074 +4 4 4 4 4 4
64075 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
64076 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
64077 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
64078 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
64079 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
64080 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
64081 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
64082 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
64083 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64084 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64085 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64088 +4 4 4 4 4 4
64089 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64090 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
64091 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
64092 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
64093 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
64094 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
64095 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
64096 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
64097 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
64098 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64099 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64102 +4 4 4 4 4 4
64103 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
64104 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
64105 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
64106 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
64107 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
64108 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
64109 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
64110 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
64111 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
64112 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64113 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64116 +4 4 4 4 4 4
64117 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64118 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
64119 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
64120 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
64121 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
64122 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
64123 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
64124 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
64125 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
64126 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64127 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64130 +4 4 4 4 4 4
64131 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
64132 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
64133 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
64134 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
64135 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
64136 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
64137 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
64138 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
64139 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
64140 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64141 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64144 +4 4 4 4 4 4
64145 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64146 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
64147 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
64148 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
64149 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
64150 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
64151 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
64152 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
64153 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
64154 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64155 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64156 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64158 +4 4 4 4 4 4
64159 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
64160 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
64161 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
64162 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
64163 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
64164 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
64165 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
64166 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
64167 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
64168 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
64169 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64172 +4 4 4 4 4 4
64173 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
64174 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
64175 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
64176 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
64177 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
64178 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
64179 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
64180 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
64181 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
64182 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
64183 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64184 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64186 +4 4 4 4 4 4
64187 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
64188 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
64189 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
64190 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
64191 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
64192 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
64193 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64194 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
64195 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
64196 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
64197 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64198 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64200 +4 4 4 4 4 4
64201 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
64202 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
64203 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
64204 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
64205 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
64206 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
64207 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
64208 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
64209 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
64210 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
64211 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64214 +4 4 4 4 4 4
64215 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
64216 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
64217 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
64218 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
64219 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
64220 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
64221 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
64222 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
64223 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
64224 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
64225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64226 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64228 +4 4 4 4 4 4
64229 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64230 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
64231 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
64232 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
64233 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
64234 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
64235 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
64236 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
64237 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
64238 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
64239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64242 +4 4 4 4 4 4
64243 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
64244 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
64245 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
64246 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
64247 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
64248 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
64249 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
64250 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
64251 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
64252 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
64253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64256 +4 4 4 4 4 4
64257 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
64258 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
64259 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
64260 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
64261 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
64262 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
64263 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
64264 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
64265 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
64266 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64270 +4 4 4 4 4 4
64271 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
64272 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64273 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
64274 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
64275 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
64276 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
64277 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
64278 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
64279 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
64280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64284 +4 4 4 4 4 4
64285 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
64286 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
64287 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
64288 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
64289 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
64290 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
64291 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
64292 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
64293 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
64294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64298 +4 4 4 4 4 4
64299 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
64300 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
64301 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
64302 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
64303 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
64304 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
64305 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
64306 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
64307 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64312 +4 4 4 4 4 4
64313 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
64314 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
64315 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
64316 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
64317 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
64318 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
64319 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
64320 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
64321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64326 +4 4 4 4 4 4
64327 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
64328 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
64329 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
64330 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
64331 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
64332 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
64333 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
64334 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
64335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64340 +4 4 4 4 4 4
64341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64342 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
64343 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64344 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
64345 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
64346 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
64347 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
64348 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
64349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64354 +4 4 4 4 4 4
64355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64356 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
64357 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
64358 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
64359 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
64360 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
64361 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
64362 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
64363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64368 +4 4 4 4 4 4
64369 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64370 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
64371 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
64372 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
64373 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
64374 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
64375 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
64376 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64382 +4 4 4 4 4 4
64383 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64384 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64385 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
64386 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
64387 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
64388 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
64389 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
64390 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64395 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64396 +4 4 4 4 4 4
64397 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64398 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64399 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64400 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64401 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
64402 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
64403 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
64404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64409 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64410 +4 4 4 4 4 4
64411 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64412 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64413 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64414 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
64415 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
64416 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
64417 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
64418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64424 +4 4 4 4 4 4
64425 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64426 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64427 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64428 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
64429 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64430 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
64431 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
64432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64438 +4 4 4 4 4 4
64439 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64440 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64441 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64442 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
64443 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
64444 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
64445 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
64446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64452 +4 4 4 4 4 4
64453 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64457 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
64458 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64459 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64466 +4 4 4 4 4 4
64467 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64471 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
64472 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
64473 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
64474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64480 +4 4 4 4 4 4
64481 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64485 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
64486 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
64487 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64494 +4 4 4 4 4 4
64495 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64496 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64499 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
64500 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
64501 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64505 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64508 +4 4 4 4 4 4
64509 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64513 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
64514 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
64515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64519 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64522 +4 4 4 4 4 4
64523 diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
64524 index 443e3c8..c443d6a 100644
64525 --- a/drivers/video/nvidia/nv_backlight.c
64526 +++ b/drivers/video/nvidia/nv_backlight.c
64527 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
64528 return bd->props.brightness;
64529 }
64530
64531 -static struct backlight_ops nvidia_bl_ops = {
64532 +static const struct backlight_ops nvidia_bl_ops = {
64533 .get_brightness = nvidia_bl_get_brightness,
64534 .update_status = nvidia_bl_update_status,
64535 };
64536 diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
64537 index d94c57f..912984c 100644
64538 --- a/drivers/video/riva/fbdev.c
64539 +++ b/drivers/video/riva/fbdev.c
64540 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
64541 return bd->props.brightness;
64542 }
64543
64544 -static struct backlight_ops riva_bl_ops = {
64545 +static const struct backlight_ops riva_bl_ops = {
64546 .get_brightness = riva_bl_get_brightness,
64547 .update_status = riva_bl_update_status,
64548 };
64549 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
64550 index 54fbb29..2c108fc 100644
64551 --- a/drivers/video/uvesafb.c
64552 +++ b/drivers/video/uvesafb.c
64553 @@ -18,6 +18,7 @@
64554 #include <linux/fb.h>
64555 #include <linux/io.h>
64556 #include <linux/mutex.h>
64557 +#include <linux/moduleloader.h>
64558 #include <video/edid.h>
64559 #include <video/uvesafb.h>
64560 #ifdef CONFIG_X86
64561 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
64562 NULL,
64563 };
64564
64565 - return call_usermodehelper(v86d_path, argv, envp, 1);
64566 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
64567 }
64568
64569 /*
64570 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
64571 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
64572 par->pmi_setpal = par->ypan = 0;
64573 } else {
64574 +
64575 +#ifdef CONFIG_PAX_KERNEXEC
64576 +#ifdef CONFIG_MODULES
64577 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
64578 +#endif
64579 + if (!par->pmi_code) {
64580 + par->pmi_setpal = par->ypan = 0;
64581 + return 0;
64582 + }
64583 +#endif
64584 +
64585 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
64586 + task->t.regs.edi);
64587 +
64588 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64589 + pax_open_kernel();
64590 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
64591 + pax_close_kernel();
64592 +
64593 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
64594 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
64595 +#else
64596 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
64597 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
64598 +#endif
64599 +
64600 printk(KERN_INFO "uvesafb: protected mode interface info at "
64601 "%04x:%04x\n",
64602 (u16)task->t.regs.es, (u16)task->t.regs.edi);
64603 @@ -1799,6 +1822,11 @@ out:
64604 if (par->vbe_modes)
64605 kfree(par->vbe_modes);
64606
64607 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64608 + if (par->pmi_code)
64609 + module_free_exec(NULL, par->pmi_code);
64610 +#endif
64611 +
64612 framebuffer_release(info);
64613 return err;
64614 }
64615 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
64616 kfree(par->vbe_state_orig);
64617 if (par->vbe_state_saved)
64618 kfree(par->vbe_state_saved);
64619 +
64620 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64621 + if (par->pmi_code)
64622 + module_free_exec(NULL, par->pmi_code);
64623 +#endif
64624 +
64625 }
64626
64627 framebuffer_release(info);
64628 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
64629 index bd37ee1..cb827e8 100644
64630 --- a/drivers/video/vesafb.c
64631 +++ b/drivers/video/vesafb.c
64632 @@ -9,6 +9,7 @@
64633 */
64634
64635 #include <linux/module.h>
64636 +#include <linux/moduleloader.h>
64637 #include <linux/kernel.h>
64638 #include <linux/errno.h>
64639 #include <linux/string.h>
64640 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
64641 static int vram_total __initdata; /* Set total amount of memory */
64642 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
64643 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
64644 -static void (*pmi_start)(void) __read_mostly;
64645 -static void (*pmi_pal) (void) __read_mostly;
64646 +static void (*pmi_start)(void) __read_only;
64647 +static void (*pmi_pal) (void) __read_only;
64648 static int depth __read_mostly;
64649 static int vga_compat __read_mostly;
64650 /* --------------------------------------------------------------------- */
64651 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
64652 unsigned int size_vmode;
64653 unsigned int size_remap;
64654 unsigned int size_total;
64655 + void *pmi_code = NULL;
64656
64657 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
64658 return -ENODEV;
64659 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
64660 size_remap = size_total;
64661 vesafb_fix.smem_len = size_remap;
64662
64663 -#ifndef __i386__
64664 - screen_info.vesapm_seg = 0;
64665 -#endif
64666 -
64667 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
64668 printk(KERN_WARNING
64669 "vesafb: cannot reserve video memory at 0x%lx\n",
64670 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
64671 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
64672 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
64673
64674 +#ifdef __i386__
64675 +
64676 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64677 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
64678 + if (!pmi_code)
64679 +#elif !defined(CONFIG_PAX_KERNEXEC)
64680 + if (0)
64681 +#endif
64682 +
64683 +#endif
64684 + screen_info.vesapm_seg = 0;
64685 +
64686 if (screen_info.vesapm_seg) {
64687 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
64688 - screen_info.vesapm_seg,screen_info.vesapm_off);
64689 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
64690 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
64691 }
64692
64693 if (screen_info.vesapm_seg < 0xc000)
64694 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
64695
64696 if (ypan || pmi_setpal) {
64697 unsigned short *pmi_base;
64698 +
64699 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
64700 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
64701 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
64702 +
64703 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64704 + pax_open_kernel();
64705 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
64706 +#else
64707 + pmi_code = pmi_base;
64708 +#endif
64709 +
64710 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
64711 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
64712 +
64713 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64714 + pmi_start = ktva_ktla(pmi_start);
64715 + pmi_pal = ktva_ktla(pmi_pal);
64716 + pax_close_kernel();
64717 +#endif
64718 +
64719 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
64720 if (pmi_base[3]) {
64721 printk(KERN_INFO "vesafb: pmi: ports = ");
64722 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
64723 info->node, info->fix.id);
64724 return 0;
64725 err:
64726 +
64727 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64728 + module_free_exec(NULL, pmi_code);
64729 +#endif
64730 +
64731 if (info->screen_base)
64732 iounmap(info->screen_base);
64733 framebuffer_release(info);
64734 diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
64735 index 88a60e0..6783cc2 100644
64736 --- a/drivers/xen/sys-hypervisor.c
64737 +++ b/drivers/xen/sys-hypervisor.c
64738 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
64739 return 0;
64740 }
64741
64742 -static struct sysfs_ops hyp_sysfs_ops = {
64743 +static const struct sysfs_ops hyp_sysfs_ops = {
64744 .show = hyp_sysfs_show,
64745 .store = hyp_sysfs_store,
64746 };
64747 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
64748 index 18f74ec..3227009 100644
64749 --- a/fs/9p/vfs_inode.c
64750 +++ b/fs/9p/vfs_inode.c
64751 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64752 static void
64753 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
64754 {
64755 - char *s = nd_get_link(nd);
64756 + const char *s = nd_get_link(nd);
64757
64758 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
64759 IS_ERR(s) ? "<error>" : s);
64760 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
64761 index bb4cc5b..df5eaa0 100644
64762 --- a/fs/Kconfig.binfmt
64763 +++ b/fs/Kconfig.binfmt
64764 @@ -86,7 +86,7 @@ config HAVE_AOUT
64765
64766 config BINFMT_AOUT
64767 tristate "Kernel support for a.out and ECOFF binaries"
64768 - depends on HAVE_AOUT
64769 + depends on HAVE_AOUT && BROKEN
64770 ---help---
64771 A.out (Assembler.OUTput) is a set of formats for libraries and
64772 executables used in the earliest versions of UNIX. Linux used
64773 diff --git a/fs/aio.c b/fs/aio.c
64774 index 22a19ad..d484e5b 100644
64775 --- a/fs/aio.c
64776 +++ b/fs/aio.c
64777 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
64778 size += sizeof(struct io_event) * nr_events;
64779 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
64780
64781 - if (nr_pages < 0)
64782 + if (nr_pages <= 0)
64783 return -EINVAL;
64784
64785 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
64786 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
64787 struct aio_timeout to;
64788 int retry = 0;
64789
64790 + pax_track_stack();
64791 +
64792 /* needed to zero any padding within an entry (there shouldn't be
64793 * any, but C is fun!
64794 */
64795 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
64796 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
64797 {
64798 ssize_t ret;
64799 + struct iovec iovstack;
64800
64801 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
64802 kiocb->ki_nbytes, 1,
64803 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
64804 + &iovstack, &kiocb->ki_iovec);
64805 if (ret < 0)
64806 goto out;
64807
64808 + if (kiocb->ki_iovec == &iovstack) {
64809 + kiocb->ki_inline_vec = iovstack;
64810 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
64811 + }
64812 kiocb->ki_nr_segs = kiocb->ki_nbytes;
64813 kiocb->ki_cur_seg = 0;
64814 /* ki_nbytes/left now reflect bytes instead of segs */
64815 diff --git a/fs/attr.c b/fs/attr.c
64816 index 96d394b..33cf5b4 100644
64817 --- a/fs/attr.c
64818 +++ b/fs/attr.c
64819 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
64820 unsigned long limit;
64821
64822 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64823 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
64824 if (limit != RLIM_INFINITY && offset > limit)
64825 goto out_sig;
64826 if (offset > inode->i_sb->s_maxbytes)
64827 diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
64828 index b4ea829..e63ef18 100644
64829 --- a/fs/autofs4/symlink.c
64830 +++ b/fs/autofs4/symlink.c
64831 @@ -15,7 +15,7 @@
64832 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
64833 {
64834 struct autofs_info *ino = autofs4_dentry_ino(dentry);
64835 - nd_set_link(nd, (char *)ino->u.symlink);
64836 + nd_set_link(nd, ino->u.symlink);
64837 return NULL;
64838 }
64839
64840 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
64841 index 136a0d6..a287331 100644
64842 --- a/fs/autofs4/waitq.c
64843 +++ b/fs/autofs4/waitq.c
64844 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
64845 {
64846 unsigned long sigpipe, flags;
64847 mm_segment_t fs;
64848 - const char *data = (const char *)addr;
64849 + const char __user *data = (const char __force_user *)addr;
64850 ssize_t wr = 0;
64851
64852 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
64853 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
64854 index 9158c07..3f06659 100644
64855 --- a/fs/befs/linuxvfs.c
64856 +++ b/fs/befs/linuxvfs.c
64857 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
64858 {
64859 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
64860 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
64861 - char *link = nd_get_link(nd);
64862 + const char *link = nd_get_link(nd);
64863 if (!IS_ERR(link))
64864 kfree(link);
64865 }
64866 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
64867 index 0133b5a..3710d09 100644
64868 --- a/fs/binfmt_aout.c
64869 +++ b/fs/binfmt_aout.c
64870 @@ -16,6 +16,7 @@
64871 #include <linux/string.h>
64872 #include <linux/fs.h>
64873 #include <linux/file.h>
64874 +#include <linux/security.h>
64875 #include <linux/stat.h>
64876 #include <linux/fcntl.h>
64877 #include <linux/ptrace.h>
64878 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64879 #endif
64880 # define START_STACK(u) (u.start_stack)
64881
64882 + memset(&dump, 0, sizeof(dump));
64883 +
64884 fs = get_fs();
64885 set_fs(KERNEL_DS);
64886 has_dumped = 1;
64887 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64888
64889 /* If the size of the dump file exceeds the rlimit, then see what would happen
64890 if we wrote the stack, but not the data area. */
64891 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
64892 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
64893 dump.u_dsize = 0;
64894
64895 /* Make sure we have enough room to write the stack and data areas. */
64896 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
64897 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
64898 dump.u_ssize = 0;
64899
64900 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64901 dump_size = dump.u_ssize << PAGE_SHIFT;
64902 DUMP_WRITE(dump_start,dump_size);
64903 }
64904 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
64905 - set_fs(KERNEL_DS);
64906 - DUMP_WRITE(current,sizeof(*current));
64907 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
64908 end_coredump:
64909 set_fs(fs);
64910 return has_dumped;
64911 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64912 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64913 if (rlim >= RLIM_INFINITY)
64914 rlim = ~0;
64915 +
64916 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
64917 if (ex.a_data + ex.a_bss > rlim)
64918 return -ENOMEM;
64919
64920 @@ -274,9 +279,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64921 current->mm->free_area_cache = current->mm->mmap_base;
64922 current->mm->cached_hole_size = 0;
64923
64924 + retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
64925 + if (retval < 0) {
64926 + /* Someone check-me: is this error path enough? */
64927 + send_sig(SIGKILL, current, 0);
64928 + return retval;
64929 + }
64930 +
64931 install_exec_creds(bprm);
64932 current->flags &= ~PF_FORKNOEXEC;
64933
64934 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64935 + current->mm->pax_flags = 0UL;
64936 +#endif
64937 +
64938 +#ifdef CONFIG_PAX_PAGEEXEC
64939 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
64940 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
64941 +
64942 +#ifdef CONFIG_PAX_EMUTRAMP
64943 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
64944 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
64945 +#endif
64946 +
64947 +#ifdef CONFIG_PAX_MPROTECT
64948 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
64949 + current->mm->pax_flags |= MF_PAX_MPROTECT;
64950 +#endif
64951 +
64952 + }
64953 +#endif
64954 +
64955 if (N_MAGIC(ex) == OMAGIC) {
64956 unsigned long text_addr, map_size;
64957 loff_t pos;
64958 @@ -349,7 +382,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64959
64960 down_write(&current->mm->mmap_sem);
64961 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
64962 - PROT_READ | PROT_WRITE | PROT_EXEC,
64963 + PROT_READ | PROT_WRITE,
64964 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
64965 fd_offset + ex.a_text);
64966 up_write(&current->mm->mmap_sem);
64967 @@ -367,13 +400,6 @@ beyond_if:
64968 return retval;
64969 }
64970
64971 - retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
64972 - if (retval < 0) {
64973 - /* Someone check-me: is this error path enough? */
64974 - send_sig(SIGKILL, current, 0);
64975 - return retval;
64976 - }
64977 -
64978 current->mm->start_stack =
64979 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
64980 #ifdef __alpha__
64981 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
64982 index a64fde6..66794b9 100644
64983 --- a/fs/binfmt_elf.c
64984 +++ b/fs/binfmt_elf.c
64985 @@ -31,6 +31,7 @@
64986 #include <linux/random.h>
64987 #include <linux/elf.h>
64988 #include <linux/utsname.h>
64989 +#include <linux/xattr.h>
64990 #include <asm/uaccess.h>
64991 #include <asm/param.h>
64992 #include <asm/page.h>
64993 @@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
64994 #define elf_core_dump NULL
64995 #endif
64996
64997 +#ifdef CONFIG_PAX_MPROTECT
64998 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
64999 +#endif
65000 +
65001 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
65002 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
65003 #else
65004 @@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
65005 .load_binary = load_elf_binary,
65006 .load_shlib = load_elf_library,
65007 .core_dump = elf_core_dump,
65008 +
65009 +#ifdef CONFIG_PAX_MPROTECT
65010 + .handle_mprotect= elf_handle_mprotect,
65011 +#endif
65012 +
65013 .min_coredump = ELF_EXEC_PAGESIZE,
65014 .hasvdso = 1
65015 };
65016 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
65017
65018 static int set_brk(unsigned long start, unsigned long end)
65019 {
65020 + unsigned long e = end;
65021 +
65022 start = ELF_PAGEALIGN(start);
65023 end = ELF_PAGEALIGN(end);
65024 if (end > start) {
65025 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
65026 if (BAD_ADDR(addr))
65027 return addr;
65028 }
65029 - current->mm->start_brk = current->mm->brk = end;
65030 + current->mm->start_brk = current->mm->brk = e;
65031 return 0;
65032 }
65033
65034 @@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
65035 elf_addr_t __user *u_rand_bytes;
65036 const char *k_platform = ELF_PLATFORM;
65037 const char *k_base_platform = ELF_BASE_PLATFORM;
65038 - unsigned char k_rand_bytes[16];
65039 + u32 k_rand_bytes[4];
65040 int items;
65041 elf_addr_t *elf_info;
65042 int ei_index = 0;
65043 const struct cred *cred = current_cred();
65044 struct vm_area_struct *vma;
65045 + unsigned long saved_auxv[AT_VECTOR_SIZE];
65046 +
65047 + pax_track_stack();
65048
65049 /*
65050 * In some cases (e.g. Hyper-Threading), we want to avoid L1
65051 @@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
65052 * Generate 16 random bytes for userspace PRNG seeding.
65053 */
65054 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
65055 - u_rand_bytes = (elf_addr_t __user *)
65056 - STACK_ALLOC(p, sizeof(k_rand_bytes));
65057 + srandom32(k_rand_bytes[0] ^ random32());
65058 + srandom32(k_rand_bytes[1] ^ random32());
65059 + srandom32(k_rand_bytes[2] ^ random32());
65060 + srandom32(k_rand_bytes[3] ^ random32());
65061 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
65062 + u_rand_bytes = (elf_addr_t __user *) p;
65063 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
65064 return -EFAULT;
65065
65066 @@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
65067 return -EFAULT;
65068 current->mm->env_end = p;
65069
65070 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
65071 +
65072 /* Put the elf_info on the stack in the right place. */
65073 sp = (elf_addr_t __user *)envp + 1;
65074 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
65075 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
65076 return -EFAULT;
65077 return 0;
65078 }
65079 @@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65080 {
65081 struct elf_phdr *elf_phdata;
65082 struct elf_phdr *eppnt;
65083 - unsigned long load_addr = 0;
65084 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
65085 int load_addr_set = 0;
65086 unsigned long last_bss = 0, elf_bss = 0;
65087 - unsigned long error = ~0UL;
65088 + unsigned long error = -EINVAL;
65089 unsigned long total_size;
65090 int retval, i, size;
65091
65092 @@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65093 goto out_close;
65094 }
65095
65096 +#ifdef CONFIG_PAX_SEGMEXEC
65097 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
65098 + pax_task_size = SEGMEXEC_TASK_SIZE;
65099 +#endif
65100 +
65101 eppnt = elf_phdata;
65102 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
65103 if (eppnt->p_type == PT_LOAD) {
65104 @@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65105 k = load_addr + eppnt->p_vaddr;
65106 if (BAD_ADDR(k) ||
65107 eppnt->p_filesz > eppnt->p_memsz ||
65108 - eppnt->p_memsz > TASK_SIZE ||
65109 - TASK_SIZE - eppnt->p_memsz < k) {
65110 + eppnt->p_memsz > pax_task_size ||
65111 + pax_task_size - eppnt->p_memsz < k) {
65112 error = -ENOMEM;
65113 goto out_close;
65114 }
65115 @@ -532,6 +558,351 @@ out:
65116 return error;
65117 }
65118
65119 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
65120 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
65121 +{
65122 + unsigned long pax_flags = 0UL;
65123 +
65124 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
65125 +
65126 +#ifdef CONFIG_PAX_PAGEEXEC
65127 + if (elf_phdata->p_flags & PF_PAGEEXEC)
65128 + pax_flags |= MF_PAX_PAGEEXEC;
65129 +#endif
65130 +
65131 +#ifdef CONFIG_PAX_SEGMEXEC
65132 + if (elf_phdata->p_flags & PF_SEGMEXEC)
65133 + pax_flags |= MF_PAX_SEGMEXEC;
65134 +#endif
65135 +
65136 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65137 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65138 + if (nx_enabled)
65139 + pax_flags &= ~MF_PAX_SEGMEXEC;
65140 + else
65141 + pax_flags &= ~MF_PAX_PAGEEXEC;
65142 + }
65143 +#endif
65144 +
65145 +#ifdef CONFIG_PAX_EMUTRAMP
65146 + if (elf_phdata->p_flags & PF_EMUTRAMP)
65147 + pax_flags |= MF_PAX_EMUTRAMP;
65148 +#endif
65149 +
65150 +#ifdef CONFIG_PAX_MPROTECT
65151 + if (elf_phdata->p_flags & PF_MPROTECT)
65152 + pax_flags |= MF_PAX_MPROTECT;
65153 +#endif
65154 +
65155 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65156 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
65157 + pax_flags |= MF_PAX_RANDMMAP;
65158 +#endif
65159 +
65160 +#endif
65161 +
65162 + return pax_flags;
65163 +}
65164 +
65165 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
65166 +{
65167 + unsigned long pax_flags = 0UL;
65168 +
65169 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
65170 +
65171 +#ifdef CONFIG_PAX_PAGEEXEC
65172 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
65173 + pax_flags |= MF_PAX_PAGEEXEC;
65174 +#endif
65175 +
65176 +#ifdef CONFIG_PAX_SEGMEXEC
65177 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
65178 + pax_flags |= MF_PAX_SEGMEXEC;
65179 +#endif
65180 +
65181 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65182 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65183 + if (nx_enabled)
65184 + pax_flags &= ~MF_PAX_SEGMEXEC;
65185 + else
65186 + pax_flags &= ~MF_PAX_PAGEEXEC;
65187 + }
65188 +#endif
65189 +
65190 +#ifdef CONFIG_PAX_EMUTRAMP
65191 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
65192 + pax_flags |= MF_PAX_EMUTRAMP;
65193 +#endif
65194 +
65195 +#ifdef CONFIG_PAX_MPROTECT
65196 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
65197 + pax_flags |= MF_PAX_MPROTECT;
65198 +#endif
65199 +
65200 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65201 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
65202 + pax_flags |= MF_PAX_RANDMMAP;
65203 +#endif
65204 +
65205 +#endif
65206 +
65207 + return pax_flags;
65208 +}
65209 +
65210 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
65211 +{
65212 + unsigned long pax_flags = 0UL;
65213 +
65214 +#ifdef CONFIG_PAX_EI_PAX
65215 +
65216 +#ifdef CONFIG_PAX_PAGEEXEC
65217 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
65218 + pax_flags |= MF_PAX_PAGEEXEC;
65219 +#endif
65220 +
65221 +#ifdef CONFIG_PAX_SEGMEXEC
65222 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
65223 + pax_flags |= MF_PAX_SEGMEXEC;
65224 +#endif
65225 +
65226 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65227 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65228 + if (nx_enabled)
65229 + pax_flags &= ~MF_PAX_SEGMEXEC;
65230 + else
65231 + pax_flags &= ~MF_PAX_PAGEEXEC;
65232 + }
65233 +#endif
65234 +
65235 +#ifdef CONFIG_PAX_EMUTRAMP
65236 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
65237 + pax_flags |= MF_PAX_EMUTRAMP;
65238 +#endif
65239 +
65240 +#ifdef CONFIG_PAX_MPROTECT
65241 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
65242 + pax_flags |= MF_PAX_MPROTECT;
65243 +#endif
65244 +
65245 +#ifdef CONFIG_PAX_ASLR
65246 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
65247 + pax_flags |= MF_PAX_RANDMMAP;
65248 +#endif
65249 +
65250 +#else
65251 +
65252 +#ifdef CONFIG_PAX_PAGEEXEC
65253 + pax_flags |= MF_PAX_PAGEEXEC;
65254 +#endif
65255 +
65256 +#ifdef CONFIG_PAX_MPROTECT
65257 + pax_flags |= MF_PAX_MPROTECT;
65258 +#endif
65259 +
65260 +#ifdef CONFIG_PAX_RANDMMAP
65261 + pax_flags |= MF_PAX_RANDMMAP;
65262 +#endif
65263 +
65264 +#ifdef CONFIG_PAX_SEGMEXEC
65265 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
65266 + pax_flags &= ~MF_PAX_PAGEEXEC;
65267 + pax_flags |= MF_PAX_SEGMEXEC;
65268 + }
65269 +#endif
65270 +
65271 +#endif
65272 +
65273 + return pax_flags;
65274 +}
65275 +
65276 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
65277 +{
65278 +
65279 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
65280 + unsigned long i;
65281 +
65282 + for (i = 0UL; i < elf_ex->e_phnum; i++)
65283 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
65284 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
65285 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
65286 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
65287 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
65288 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
65289 + return ~0UL;
65290 +
65291 +#ifdef CONFIG_PAX_SOFTMODE
65292 + if (pax_softmode)
65293 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
65294 + else
65295 +#endif
65296 +
65297 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
65298 + break;
65299 + }
65300 +#endif
65301 +
65302 + return ~0UL;
65303 +}
65304 +
65305 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
65306 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
65307 +{
65308 + unsigned long pax_flags = 0UL;
65309 +
65310 +#ifdef CONFIG_PAX_PAGEEXEC
65311 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
65312 + pax_flags |= MF_PAX_PAGEEXEC;
65313 +#endif
65314 +
65315 +#ifdef CONFIG_PAX_SEGMEXEC
65316 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
65317 + pax_flags |= MF_PAX_SEGMEXEC;
65318 +#endif
65319 +
65320 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65321 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65322 + if ((__supported_pte_mask & _PAGE_NX))
65323 + pax_flags &= ~MF_PAX_SEGMEXEC;
65324 + else
65325 + pax_flags &= ~MF_PAX_PAGEEXEC;
65326 + }
65327 +#endif
65328 +
65329 +#ifdef CONFIG_PAX_EMUTRAMP
65330 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
65331 + pax_flags |= MF_PAX_EMUTRAMP;
65332 +#endif
65333 +
65334 +#ifdef CONFIG_PAX_MPROTECT
65335 + if (pax_flags_softmode & MF_PAX_MPROTECT)
65336 + pax_flags |= MF_PAX_MPROTECT;
65337 +#endif
65338 +
65339 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65340 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
65341 + pax_flags |= MF_PAX_RANDMMAP;
65342 +#endif
65343 +
65344 + return pax_flags;
65345 +}
65346 +
65347 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
65348 +{
65349 + unsigned long pax_flags = 0UL;
65350 +
65351 +#ifdef CONFIG_PAX_PAGEEXEC
65352 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
65353 + pax_flags |= MF_PAX_PAGEEXEC;
65354 +#endif
65355 +
65356 +#ifdef CONFIG_PAX_SEGMEXEC
65357 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
65358 + pax_flags |= MF_PAX_SEGMEXEC;
65359 +#endif
65360 +
65361 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65362 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65363 + if ((__supported_pte_mask & _PAGE_NX))
65364 + pax_flags &= ~MF_PAX_SEGMEXEC;
65365 + else
65366 + pax_flags &= ~MF_PAX_PAGEEXEC;
65367 + }
65368 +#endif
65369 +
65370 +#ifdef CONFIG_PAX_EMUTRAMP
65371 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
65372 + pax_flags |= MF_PAX_EMUTRAMP;
65373 +#endif
65374 +
65375 +#ifdef CONFIG_PAX_MPROTECT
65376 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
65377 + pax_flags |= MF_PAX_MPROTECT;
65378 +#endif
65379 +
65380 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65381 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
65382 + pax_flags |= MF_PAX_RANDMMAP;
65383 +#endif
65384 +
65385 + return pax_flags;
65386 +}
65387 +#endif
65388 +
65389 +static unsigned long pax_parse_xattr_pax(struct file * const file)
65390 +{
65391 +
65392 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
65393 + ssize_t xattr_size, i;
65394 + unsigned char xattr_value[5];
65395 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
65396 +
65397 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
65398 + if (xattr_size <= 0)
65399 + return ~0UL;
65400 +
65401 + for (i = 0; i < xattr_size; i++)
65402 + switch (xattr_value[i]) {
65403 + default:
65404 + return ~0UL;
65405 +
65406 +#define parse_flag(option1, option2, flag) \
65407 + case option1: \
65408 + pax_flags_hardmode |= MF_PAX_##flag; \
65409 + break; \
65410 + case option2: \
65411 + pax_flags_softmode |= MF_PAX_##flag; \
65412 + break;
65413 +
65414 + parse_flag('p', 'P', PAGEEXEC);
65415 + parse_flag('e', 'E', EMUTRAMP);
65416 + parse_flag('m', 'M', MPROTECT);
65417 + parse_flag('r', 'R', RANDMMAP);
65418 + parse_flag('s', 'S', SEGMEXEC);
65419 +
65420 +#undef parse_flag
65421 + }
65422 +
65423 + if (pax_flags_hardmode & pax_flags_softmode)
65424 + return ~0UL;
65425 +
65426 +#ifdef CONFIG_PAX_SOFTMODE
65427 + if (pax_softmode)
65428 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
65429 + else
65430 +#endif
65431 +
65432 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
65433 +#else
65434 + return ~0UL;
65435 +#endif
65436 +
65437 +}
65438 +
65439 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
65440 +{
65441 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
65442 +
65443 + pax_flags = pax_parse_ei_pax(elf_ex);
65444 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
65445 + xattr_pax_flags = pax_parse_xattr_pax(file);
65446 +
65447 + if (pt_pax_flags == ~0UL)
65448 + pt_pax_flags = xattr_pax_flags;
65449 + else if (xattr_pax_flags == ~0UL)
65450 + xattr_pax_flags = pt_pax_flags;
65451 + if (pt_pax_flags != xattr_pax_flags)
65452 + return -EINVAL;
65453 + if (pt_pax_flags != ~0UL)
65454 + pax_flags = pt_pax_flags;
65455 +
65456 + if (0 > pax_check_flags(&pax_flags))
65457 + return -EINVAL;
65458 +
65459 + current->mm->pax_flags = pax_flags;
65460 + return 0;
65461 +}
65462 +#endif
65463 +
65464 /*
65465 * These are the functions used to load ELF style executables and shared
65466 * libraries. There is no binary dependent code anywhere else.
65467 @@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
65468 {
65469 unsigned int random_variable = 0;
65470
65471 +#ifdef CONFIG_PAX_RANDUSTACK
65472 + if (randomize_va_space)
65473 + return stack_top - current->mm->delta_stack;
65474 +#endif
65475 +
65476 if ((current->flags & PF_RANDOMIZE) &&
65477 !(current->personality & ADDR_NO_RANDOMIZE)) {
65478 random_variable = get_random_int() & STACK_RND_MASK;
65479 @@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65480 unsigned long load_addr = 0, load_bias = 0;
65481 int load_addr_set = 0;
65482 char * elf_interpreter = NULL;
65483 - unsigned long error;
65484 + unsigned long error = 0;
65485 struct elf_phdr *elf_ppnt, *elf_phdata;
65486 unsigned long elf_bss, elf_brk;
65487 int retval, i;
65488 @@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65489 unsigned long start_code, end_code, start_data, end_data;
65490 unsigned long reloc_func_desc = 0;
65491 int executable_stack = EXSTACK_DEFAULT;
65492 - unsigned long def_flags = 0;
65493 struct {
65494 struct elfhdr elf_ex;
65495 struct elfhdr interp_elf_ex;
65496 } *loc;
65497 + unsigned long pax_task_size = TASK_SIZE;
65498
65499 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
65500 if (!loc) {
65501 @@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65502
65503 /* OK, This is the point of no return */
65504 current->flags &= ~PF_FORKNOEXEC;
65505 - current->mm->def_flags = def_flags;
65506 +
65507 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65508 + current->mm->pax_flags = 0UL;
65509 +#endif
65510 +
65511 +#ifdef CONFIG_PAX_DLRESOLVE
65512 + current->mm->call_dl_resolve = 0UL;
65513 +#endif
65514 +
65515 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
65516 + current->mm->call_syscall = 0UL;
65517 +#endif
65518 +
65519 +#ifdef CONFIG_PAX_ASLR
65520 + current->mm->delta_mmap = 0UL;
65521 + current->mm->delta_stack = 0UL;
65522 +#endif
65523 +
65524 + current->mm->def_flags = 0;
65525 +
65526 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
65527 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
65528 + send_sig(SIGKILL, current, 0);
65529 + goto out_free_dentry;
65530 + }
65531 +#endif
65532 +
65533 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
65534 + pax_set_initial_flags(bprm);
65535 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
65536 + if (pax_set_initial_flags_func)
65537 + (pax_set_initial_flags_func)(bprm);
65538 +#endif
65539 +
65540 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65541 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
65542 + current->mm->context.user_cs_limit = PAGE_SIZE;
65543 + current->mm->def_flags |= VM_PAGEEXEC;
65544 + }
65545 +#endif
65546 +
65547 +#ifdef CONFIG_PAX_SEGMEXEC
65548 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65549 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
65550 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
65551 + pax_task_size = SEGMEXEC_TASK_SIZE;
65552 + }
65553 +#endif
65554 +
65555 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
65556 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65557 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
65558 + put_cpu();
65559 + }
65560 +#endif
65561
65562 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
65563 may depend on the personality. */
65564 SET_PERSONALITY(loc->elf_ex);
65565 +
65566 +#ifdef CONFIG_PAX_ASLR
65567 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
65568 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
65569 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
65570 + }
65571 +#endif
65572 +
65573 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65574 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65575 + executable_stack = EXSTACK_DISABLE_X;
65576 + current->personality &= ~READ_IMPLIES_EXEC;
65577 + } else
65578 +#endif
65579 +
65580 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
65581 current->personality |= READ_IMPLIES_EXEC;
65582
65583 @@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65584 * might try to exec. This is because the brk will
65585 * follow the loader, and is not movable. */
65586 #ifdef CONFIG_X86
65587 - load_bias = 0;
65588 + if (current->flags & PF_RANDOMIZE)
65589 + load_bias = 0;
65590 + else
65591 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
65592 #else
65593 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
65594 #endif
65595 +
65596 +#ifdef CONFIG_PAX_RANDMMAP
65597 + /* PaX: randomize base address at the default exe base if requested */
65598 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
65599 +#ifdef CONFIG_SPARC64
65600 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
65601 +#else
65602 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
65603 +#endif
65604 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
65605 + elf_flags |= MAP_FIXED;
65606 + }
65607 +#endif
65608 +
65609 }
65610
65611 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
65612 @@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65613 * allowed task size. Note that p_filesz must always be
65614 * <= p_memsz so it is only necessary to check p_memsz.
65615 */
65616 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
65617 - elf_ppnt->p_memsz > TASK_SIZE ||
65618 - TASK_SIZE - elf_ppnt->p_memsz < k) {
65619 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
65620 + elf_ppnt->p_memsz > pax_task_size ||
65621 + pax_task_size - elf_ppnt->p_memsz < k) {
65622 /* set_brk can never work. Avoid overflows. */
65623 send_sig(SIGKILL, current, 0);
65624 retval = -EINVAL;
65625 @@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65626 start_data += load_bias;
65627 end_data += load_bias;
65628
65629 +#ifdef CONFIG_PAX_RANDMMAP
65630 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
65631 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
65632 +#endif
65633 +
65634 /* Calling set_brk effectively mmaps the pages that we need
65635 * for the bss and break sections. We must do this before
65636 * mapping in the interpreter, to make sure it doesn't wind
65637 @@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65638 goto out_free_dentry;
65639 }
65640 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
65641 - send_sig(SIGSEGV, current, 0);
65642 - retval = -EFAULT; /* Nobody gets to see this, but.. */
65643 - goto out_free_dentry;
65644 + /*
65645 + * This bss-zeroing can fail if the ELF
65646 + * file specifies odd protections. So
65647 + * we don't check the return value
65648 + */
65649 }
65650
65651 if (elf_interpreter) {
65652 @@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
65653 unsigned long n = off;
65654 if (n > PAGE_SIZE)
65655 n = PAGE_SIZE;
65656 - if (!dump_write(file, buf, n))
65657 + if (!dump_write(file, buf, n)) {
65658 + free_page((unsigned long)buf);
65659 return 0;
65660 + }
65661 off -= n;
65662 }
65663 free_page((unsigned long)buf);
65664 @@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
65665 * Decide what to dump of a segment, part, all or none.
65666 */
65667 static unsigned long vma_dump_size(struct vm_area_struct *vma,
65668 - unsigned long mm_flags)
65669 + unsigned long mm_flags, long signr)
65670 {
65671 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
65672
65673 @@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
65674 if (vma->vm_file == NULL)
65675 return 0;
65676
65677 - if (FILTER(MAPPED_PRIVATE))
65678 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
65679 goto whole;
65680
65681 /*
65682 @@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
65683 #undef DUMP_WRITE
65684
65685 #define DUMP_WRITE(addr, nr) \
65686 + do { \
65687 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
65688 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
65689 - goto end_coredump;
65690 + goto end_coredump; \
65691 + } while (0);
65692
65693 static void fill_elf_header(struct elfhdr *elf, int segs,
65694 u16 machine, u32 flags, u8 osabi)
65695 @@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
65696 {
65697 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
65698 int i = 0;
65699 - do
65700 + do {
65701 i += 2;
65702 - while (auxv[i - 2] != AT_NULL);
65703 + } while (auxv[i - 2] != AT_NULL);
65704 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
65705 }
65706
65707 @@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65708 phdr.p_offset = offset;
65709 phdr.p_vaddr = vma->vm_start;
65710 phdr.p_paddr = 0;
65711 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
65712 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
65713 phdr.p_memsz = vma->vm_end - vma->vm_start;
65714 offset += phdr.p_filesz;
65715 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
65716 @@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65717 unsigned long addr;
65718 unsigned long end;
65719
65720 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
65721 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
65722
65723 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
65724 struct page *page;
65725 @@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65726 page = get_dump_page(addr);
65727 if (page) {
65728 void *kaddr = kmap(page);
65729 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
65730 stop = ((size += PAGE_SIZE) > limit) ||
65731 !dump_write(file, kaddr, PAGE_SIZE);
65732 kunmap(page);
65733 @@ -2042,6 +2517,97 @@ out:
65734
65735 #endif /* USE_ELF_CORE_DUMP */
65736
65737 +#ifdef CONFIG_PAX_MPROTECT
65738 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
65739 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
65740 + * we'll remove VM_MAYWRITE for good on RELRO segments.
65741 + *
65742 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
65743 + * basis because we want to allow the common case and not the special ones.
65744 + */
65745 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
65746 +{
65747 + struct elfhdr elf_h;
65748 + struct elf_phdr elf_p;
65749 + unsigned long i;
65750 + unsigned long oldflags;
65751 + bool is_textrel_rw, is_textrel_rx, is_relro;
65752 +
65753 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
65754 + return;
65755 +
65756 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
65757 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
65758 +
65759 +#ifdef CONFIG_PAX_ELFRELOCS
65760 + /* possible TEXTREL */
65761 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
65762 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
65763 +#else
65764 + is_textrel_rw = false;
65765 + is_textrel_rx = false;
65766 +#endif
65767 +
65768 + /* possible RELRO */
65769 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
65770 +
65771 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
65772 + return;
65773 +
65774 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
65775 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
65776 +
65777 +#ifdef CONFIG_PAX_ETEXECRELOCS
65778 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
65779 +#else
65780 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
65781 +#endif
65782 +
65783 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
65784 + !elf_check_arch(&elf_h) ||
65785 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
65786 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
65787 + return;
65788 +
65789 + for (i = 0UL; i < elf_h.e_phnum; i++) {
65790 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
65791 + return;
65792 + switch (elf_p.p_type) {
65793 + case PT_DYNAMIC:
65794 + if (!is_textrel_rw && !is_textrel_rx)
65795 + continue;
65796 + i = 0UL;
65797 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
65798 + elf_dyn dyn;
65799 +
65800 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
65801 + return;
65802 + if (dyn.d_tag == DT_NULL)
65803 + return;
65804 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
65805 + gr_log_textrel(vma);
65806 + if (is_textrel_rw)
65807 + vma->vm_flags |= VM_MAYWRITE;
65808 + else
65809 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
65810 + vma->vm_flags &= ~VM_MAYWRITE;
65811 + return;
65812 + }
65813 + i++;
65814 + }
65815 + return;
65816 +
65817 + case PT_GNU_RELRO:
65818 + if (!is_relro)
65819 + continue;
65820 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
65821 + vma->vm_flags &= ~VM_MAYWRITE;
65822 + return;
65823 + }
65824 + }
65825 +}
65826 +#endif
65827 +
65828 static int __init init_elf_binfmt(void)
65829 {
65830 return register_binfmt(&elf_format);
65831 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
65832 index ca88c46..f155a60 100644
65833 --- a/fs/binfmt_flat.c
65834 +++ b/fs/binfmt_flat.c
65835 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
65836 realdatastart = (unsigned long) -ENOMEM;
65837 printk("Unable to allocate RAM for process data, errno %d\n",
65838 (int)-realdatastart);
65839 + down_write(&current->mm->mmap_sem);
65840 do_munmap(current->mm, textpos, text_len);
65841 + up_write(&current->mm->mmap_sem);
65842 ret = realdatastart;
65843 goto err;
65844 }
65845 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
65846 }
65847 if (IS_ERR_VALUE(result)) {
65848 printk("Unable to read data+bss, errno %d\n", (int)-result);
65849 + down_write(&current->mm->mmap_sem);
65850 do_munmap(current->mm, textpos, text_len);
65851 do_munmap(current->mm, realdatastart, data_len + extra);
65852 + up_write(&current->mm->mmap_sem);
65853 ret = result;
65854 goto err;
65855 }
65856 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
65857 }
65858 if (IS_ERR_VALUE(result)) {
65859 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
65860 + down_write(&current->mm->mmap_sem);
65861 do_munmap(current->mm, textpos, text_len + data_len + extra +
65862 MAX_SHARED_LIBS * sizeof(unsigned long));
65863 + up_write(&current->mm->mmap_sem);
65864 ret = result;
65865 goto err;
65866 }
65867 diff --git a/fs/bio.c b/fs/bio.c
65868 index e696713..83de133 100644
65869 --- a/fs/bio.c
65870 +++ b/fs/bio.c
65871 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
65872
65873 i = 0;
65874 while (i < bio_slab_nr) {
65875 - struct bio_slab *bslab = &bio_slabs[i];
65876 + bslab = &bio_slabs[i];
65877
65878 if (!bslab->slab && entry == -1)
65879 entry = i;
65880 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
65881 const int read = bio_data_dir(bio) == READ;
65882 struct bio_map_data *bmd = bio->bi_private;
65883 int i;
65884 - char *p = bmd->sgvecs[0].iov_base;
65885 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
65886
65887 __bio_for_each_segment(bvec, bio, i, 0) {
65888 char *addr = page_address(bvec->bv_page);
65889 diff --git a/fs/block_dev.c b/fs/block_dev.c
65890 index e65efa2..04fae57 100644
65891 --- a/fs/block_dev.c
65892 +++ b/fs/block_dev.c
65893 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
65894 else if (bdev->bd_contains == bdev)
65895 res = 0; /* is a whole device which isn't held */
65896
65897 - else if (bdev->bd_contains->bd_holder == bd_claim)
65898 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
65899 res = 0; /* is a partition of a device that is being partitioned */
65900 else if (bdev->bd_contains->bd_holder != NULL)
65901 res = -EBUSY; /* is a partition of a held device */
65902 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
65903 index c4bc570..42acd8d 100644
65904 --- a/fs/btrfs/ctree.c
65905 +++ b/fs/btrfs/ctree.c
65906 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
65907 free_extent_buffer(buf);
65908 add_root_to_dirty_list(root);
65909 } else {
65910 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
65911 - parent_start = parent->start;
65912 - else
65913 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
65914 + if (parent)
65915 + parent_start = parent->start;
65916 + else
65917 + parent_start = 0;
65918 + } else
65919 parent_start = 0;
65920
65921 WARN_ON(trans->transid != btrfs_header_generation(parent));
65922 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
65923
65924 ret = 0;
65925 if (slot == 0) {
65926 - struct btrfs_disk_key disk_key;
65927 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
65928 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
65929 }
65930 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
65931 index f447188..59c17c5 100644
65932 --- a/fs/btrfs/disk-io.c
65933 +++ b/fs/btrfs/disk-io.c
65934 @@ -39,7 +39,7 @@
65935 #include "tree-log.h"
65936 #include "free-space-cache.h"
65937
65938 -static struct extent_io_ops btree_extent_io_ops;
65939 +static const struct extent_io_ops btree_extent_io_ops;
65940 static void end_workqueue_fn(struct btrfs_work *work);
65941 static void free_fs_root(struct btrfs_root *root);
65942
65943 @@ -2607,7 +2607,7 @@ out:
65944 return 0;
65945 }
65946
65947 -static struct extent_io_ops btree_extent_io_ops = {
65948 +static const struct extent_io_ops btree_extent_io_ops = {
65949 .write_cache_pages_lock_hook = btree_lock_page_hook,
65950 .readpage_end_io_hook = btree_readpage_end_io_hook,
65951 .submit_bio_hook = btree_submit_bio_hook,
65952 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
65953 index 559f724..a026171 100644
65954 --- a/fs/btrfs/extent-tree.c
65955 +++ b/fs/btrfs/extent-tree.c
65956 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
65957 u64 group_start = group->key.objectid;
65958 new_extents = kmalloc(sizeof(*new_extents),
65959 GFP_NOFS);
65960 + if (!new_extents) {
65961 + ret = -ENOMEM;
65962 + goto out;
65963 + }
65964 nr_extents = 1;
65965 ret = get_new_locations(reloc_inode,
65966 extent_key,
65967 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
65968 index 36de250..7ec75c7 100644
65969 --- a/fs/btrfs/extent_io.h
65970 +++ b/fs/btrfs/extent_io.h
65971 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
65972 struct bio *bio, int mirror_num,
65973 unsigned long bio_flags);
65974 struct extent_io_ops {
65975 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
65976 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
65977 u64 start, u64 end, int *page_started,
65978 unsigned long *nr_written);
65979 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
65980 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
65981 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
65982 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
65983 extent_submit_bio_hook_t *submit_bio_hook;
65984 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
65985 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
65986 size_t size, struct bio *bio,
65987 unsigned long bio_flags);
65988 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
65989 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
65990 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
65991 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
65992 u64 start, u64 end,
65993 struct extent_state *state);
65994 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
65995 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
65996 u64 start, u64 end,
65997 struct extent_state *state);
65998 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
65999 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
66000 struct extent_state *state);
66001 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
66002 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
66003 struct extent_state *state, int uptodate);
66004 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
66005 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
66006 unsigned long old, unsigned long bits);
66007 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
66008 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
66009 unsigned long bits);
66010 - int (*merge_extent_hook)(struct inode *inode,
66011 + int (* const merge_extent_hook)(struct inode *inode,
66012 struct extent_state *new,
66013 struct extent_state *other);
66014 - int (*split_extent_hook)(struct inode *inode,
66015 + int (* const split_extent_hook)(struct inode *inode,
66016 struct extent_state *orig, u64 split);
66017 - int (*write_cache_pages_lock_hook)(struct page *page);
66018 + int (* const write_cache_pages_lock_hook)(struct page *page);
66019 };
66020
66021 struct extent_io_tree {
66022 @@ -88,7 +88,7 @@ struct extent_io_tree {
66023 u64 dirty_bytes;
66024 spinlock_t lock;
66025 spinlock_t buffer_lock;
66026 - struct extent_io_ops *ops;
66027 + const struct extent_io_ops *ops;
66028 };
66029
66030 struct extent_state {
66031 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
66032 index cb2849f..3718fb4 100644
66033 --- a/fs/btrfs/free-space-cache.c
66034 +++ b/fs/btrfs/free-space-cache.c
66035 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
66036
66037 while(1) {
66038 if (entry->bytes < bytes || entry->offset < min_start) {
66039 - struct rb_node *node;
66040 -
66041 node = rb_next(&entry->offset_index);
66042 if (!node)
66043 break;
66044 @@ -1226,7 +1224,7 @@ again:
66045 */
66046 while (entry->bitmap || found_bitmap ||
66047 (!entry->bitmap && entry->bytes < min_bytes)) {
66048 - struct rb_node *node = rb_next(&entry->offset_index);
66049 + node = rb_next(&entry->offset_index);
66050
66051 if (entry->bitmap && entry->bytes > bytes + empty_size) {
66052 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
66053 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
66054 index e03a836..323837e 100644
66055 --- a/fs/btrfs/inode.c
66056 +++ b/fs/btrfs/inode.c
66057 @@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
66058 static const struct address_space_operations btrfs_aops;
66059 static const struct address_space_operations btrfs_symlink_aops;
66060 static const struct file_operations btrfs_dir_file_operations;
66061 -static struct extent_io_ops btrfs_extent_io_ops;
66062 +static const struct extent_io_ops btrfs_extent_io_ops;
66063
66064 static struct kmem_cache *btrfs_inode_cachep;
66065 struct kmem_cache *btrfs_trans_handle_cachep;
66066 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
66067 1, 0, NULL, GFP_NOFS);
66068 while (start < end) {
66069 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
66070 + BUG_ON(!async_cow);
66071 async_cow->inode = inode;
66072 async_cow->root = root;
66073 async_cow->locked_page = locked_page;
66074 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
66075 inline_size = btrfs_file_extent_inline_item_len(leaf,
66076 btrfs_item_nr(leaf, path->slots[0]));
66077 tmp = kmalloc(inline_size, GFP_NOFS);
66078 + if (!tmp)
66079 + return -ENOMEM;
66080 ptr = btrfs_file_extent_inline_start(item);
66081
66082 read_extent_buffer(leaf, tmp, ptr, inline_size);
66083 @@ -5410,7 +5413,7 @@ fail:
66084 return -ENOMEM;
66085 }
66086
66087 -static int btrfs_getattr(struct vfsmount *mnt,
66088 +int btrfs_getattr(struct vfsmount *mnt,
66089 struct dentry *dentry, struct kstat *stat)
66090 {
66091 struct inode *inode = dentry->d_inode;
66092 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
66093 return 0;
66094 }
66095
66096 +EXPORT_SYMBOL(btrfs_getattr);
66097 +
66098 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
66099 +{
66100 + return BTRFS_I(inode)->root->anon_super.s_dev;
66101 +}
66102 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
66103 +
66104 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
66105 struct inode *new_dir, struct dentry *new_dentry)
66106 {
66107 @@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
66108 .fsync = btrfs_sync_file,
66109 };
66110
66111 -static struct extent_io_ops btrfs_extent_io_ops = {
66112 +static const struct extent_io_ops btrfs_extent_io_ops = {
66113 .fill_delalloc = run_delalloc_range,
66114 .submit_bio_hook = btrfs_submit_bio_hook,
66115 .merge_bio_hook = btrfs_merge_bio_hook,
66116 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
66117 index ab7ab53..94e0781 100644
66118 --- a/fs/btrfs/relocation.c
66119 +++ b/fs/btrfs/relocation.c
66120 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
66121 }
66122 spin_unlock(&rc->reloc_root_tree.lock);
66123
66124 - BUG_ON((struct btrfs_root *)node->data != root);
66125 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
66126
66127 if (!del) {
66128 spin_lock(&rc->reloc_root_tree.lock);
66129 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
66130 index a240b6f..4ce16ef 100644
66131 --- a/fs/btrfs/sysfs.c
66132 +++ b/fs/btrfs/sysfs.c
66133 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
66134 complete(&root->kobj_unregister);
66135 }
66136
66137 -static struct sysfs_ops btrfs_super_attr_ops = {
66138 +static const struct sysfs_ops btrfs_super_attr_ops = {
66139 .show = btrfs_super_attr_show,
66140 .store = btrfs_super_attr_store,
66141 };
66142
66143 -static struct sysfs_ops btrfs_root_attr_ops = {
66144 +static const struct sysfs_ops btrfs_root_attr_ops = {
66145 .show = btrfs_root_attr_show,
66146 .store = btrfs_root_attr_store,
66147 };
66148 diff --git a/fs/buffer.c b/fs/buffer.c
66149 index 6fa5302..395d9f6 100644
66150 --- a/fs/buffer.c
66151 +++ b/fs/buffer.c
66152 @@ -25,6 +25,7 @@
66153 #include <linux/percpu.h>
66154 #include <linux/slab.h>
66155 #include <linux/capability.h>
66156 +#include <linux/security.h>
66157 #include <linux/blkdev.h>
66158 #include <linux/file.h>
66159 #include <linux/quotaops.h>
66160 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
66161 index 3797e00..ce776f6 100644
66162 --- a/fs/cachefiles/bind.c
66163 +++ b/fs/cachefiles/bind.c
66164 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
66165 args);
66166
66167 /* start by checking things over */
66168 - ASSERT(cache->fstop_percent >= 0 &&
66169 - cache->fstop_percent < cache->fcull_percent &&
66170 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
66171 cache->fcull_percent < cache->frun_percent &&
66172 cache->frun_percent < 100);
66173
66174 - ASSERT(cache->bstop_percent >= 0 &&
66175 - cache->bstop_percent < cache->bcull_percent &&
66176 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
66177 cache->bcull_percent < cache->brun_percent &&
66178 cache->brun_percent < 100);
66179
66180 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
66181 index 4618516..bb30d01 100644
66182 --- a/fs/cachefiles/daemon.c
66183 +++ b/fs/cachefiles/daemon.c
66184 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
66185 if (test_bit(CACHEFILES_DEAD, &cache->flags))
66186 return -EIO;
66187
66188 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
66189 + if (datalen > PAGE_SIZE - 1)
66190 return -EOPNOTSUPP;
66191
66192 /* drag the command string into the kernel so we can parse it */
66193 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
66194 if (args[0] != '%' || args[1] != '\0')
66195 return -EINVAL;
66196
66197 - if (fstop < 0 || fstop >= cache->fcull_percent)
66198 + if (fstop >= cache->fcull_percent)
66199 return cachefiles_daemon_range_error(cache, args);
66200
66201 cache->fstop_percent = fstop;
66202 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
66203 if (args[0] != '%' || args[1] != '\0')
66204 return -EINVAL;
66205
66206 - if (bstop < 0 || bstop >= cache->bcull_percent)
66207 + if (bstop >= cache->bcull_percent)
66208 return cachefiles_daemon_range_error(cache, args);
66209
66210 cache->bstop_percent = bstop;
66211 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
66212 index f7c255f..fcd61de 100644
66213 --- a/fs/cachefiles/internal.h
66214 +++ b/fs/cachefiles/internal.h
66215 @@ -56,7 +56,7 @@ struct cachefiles_cache {
66216 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
66217 struct rb_root active_nodes; /* active nodes (can't be culled) */
66218 rwlock_t active_lock; /* lock for active_nodes */
66219 - atomic_t gravecounter; /* graveyard uniquifier */
66220 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
66221 unsigned frun_percent; /* when to stop culling (% files) */
66222 unsigned fcull_percent; /* when to start culling (% files) */
66223 unsigned fstop_percent; /* when to stop allocating (% files) */
66224 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
66225 * proc.c
66226 */
66227 #ifdef CONFIG_CACHEFILES_HISTOGRAM
66228 -extern atomic_t cachefiles_lookup_histogram[HZ];
66229 -extern atomic_t cachefiles_mkdir_histogram[HZ];
66230 -extern atomic_t cachefiles_create_histogram[HZ];
66231 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
66232 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
66233 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
66234
66235 extern int __init cachefiles_proc_init(void);
66236 extern void cachefiles_proc_cleanup(void);
66237 static inline
66238 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
66239 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
66240 {
66241 unsigned long jif = jiffies - start_jif;
66242 if (jif >= HZ)
66243 jif = HZ - 1;
66244 - atomic_inc(&histogram[jif]);
66245 + atomic_inc_unchecked(&histogram[jif]);
66246 }
66247
66248 #else
66249 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
66250 index 14ac480..a62766c 100644
66251 --- a/fs/cachefiles/namei.c
66252 +++ b/fs/cachefiles/namei.c
66253 @@ -250,7 +250,7 @@ try_again:
66254 /* first step is to make up a grave dentry in the graveyard */
66255 sprintf(nbuffer, "%08x%08x",
66256 (uint32_t) get_seconds(),
66257 - (uint32_t) atomic_inc_return(&cache->gravecounter));
66258 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
66259
66260 /* do the multiway lock magic */
66261 trap = lock_rename(cache->graveyard, dir);
66262 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
66263 index eccd339..4c1d995 100644
66264 --- a/fs/cachefiles/proc.c
66265 +++ b/fs/cachefiles/proc.c
66266 @@ -14,9 +14,9 @@
66267 #include <linux/seq_file.h>
66268 #include "internal.h"
66269
66270 -atomic_t cachefiles_lookup_histogram[HZ];
66271 -atomic_t cachefiles_mkdir_histogram[HZ];
66272 -atomic_t cachefiles_create_histogram[HZ];
66273 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
66274 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
66275 +atomic_unchecked_t cachefiles_create_histogram[HZ];
66276
66277 /*
66278 * display the latency histogram
66279 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
66280 return 0;
66281 default:
66282 index = (unsigned long) v - 3;
66283 - x = atomic_read(&cachefiles_lookup_histogram[index]);
66284 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
66285 - z = atomic_read(&cachefiles_create_histogram[index]);
66286 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
66287 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
66288 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
66289 if (x == 0 && y == 0 && z == 0)
66290 return 0;
66291
66292 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
66293 index a6c8c6f..5cf8517 100644
66294 --- a/fs/cachefiles/rdwr.c
66295 +++ b/fs/cachefiles/rdwr.c
66296 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
66297 old_fs = get_fs();
66298 set_fs(KERNEL_DS);
66299 ret = file->f_op->write(
66300 - file, (const void __user *) data, len, &pos);
66301 + file, (const void __force_user *) data, len, &pos);
66302 set_fs(old_fs);
66303 kunmap(page);
66304 if (ret != len)
66305 diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
66306 index 20692fb..0098fb7 100644
66307 --- a/fs/cifs/asn1.c
66308 +++ b/fs/cifs/asn1.c
66309 @@ -416,6 +416,9 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
66310
66311 static int
66312 asn1_oid_decode(struct asn1_ctx *ctx,
66313 + unsigned char *eoc, unsigned long **oid, unsigned int *len) __size_overflow(2);
66314 +static int
66315 +asn1_oid_decode(struct asn1_ctx *ctx,
66316 unsigned char *eoc, unsigned long **oid, unsigned int *len)
66317 {
66318 unsigned long subid;
66319 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
66320 index 42cec2a..2aba466 100644
66321 --- a/fs/cifs/cifs_debug.c
66322 +++ b/fs/cifs/cifs_debug.c
66323 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
66324 tcon = list_entry(tmp3,
66325 struct cifsTconInfo,
66326 tcon_list);
66327 - atomic_set(&tcon->num_smbs_sent, 0);
66328 - atomic_set(&tcon->num_writes, 0);
66329 - atomic_set(&tcon->num_reads, 0);
66330 - atomic_set(&tcon->num_oplock_brks, 0);
66331 - atomic_set(&tcon->num_opens, 0);
66332 - atomic_set(&tcon->num_posixopens, 0);
66333 - atomic_set(&tcon->num_posixmkdirs, 0);
66334 - atomic_set(&tcon->num_closes, 0);
66335 - atomic_set(&tcon->num_deletes, 0);
66336 - atomic_set(&tcon->num_mkdirs, 0);
66337 - atomic_set(&tcon->num_rmdirs, 0);
66338 - atomic_set(&tcon->num_renames, 0);
66339 - atomic_set(&tcon->num_t2renames, 0);
66340 - atomic_set(&tcon->num_ffirst, 0);
66341 - atomic_set(&tcon->num_fnext, 0);
66342 - atomic_set(&tcon->num_fclose, 0);
66343 - atomic_set(&tcon->num_hardlinks, 0);
66344 - atomic_set(&tcon->num_symlinks, 0);
66345 - atomic_set(&tcon->num_locks, 0);
66346 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
66347 + atomic_set_unchecked(&tcon->num_writes, 0);
66348 + atomic_set_unchecked(&tcon->num_reads, 0);
66349 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
66350 + atomic_set_unchecked(&tcon->num_opens, 0);
66351 + atomic_set_unchecked(&tcon->num_posixopens, 0);
66352 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
66353 + atomic_set_unchecked(&tcon->num_closes, 0);
66354 + atomic_set_unchecked(&tcon->num_deletes, 0);
66355 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
66356 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
66357 + atomic_set_unchecked(&tcon->num_renames, 0);
66358 + atomic_set_unchecked(&tcon->num_t2renames, 0);
66359 + atomic_set_unchecked(&tcon->num_ffirst, 0);
66360 + atomic_set_unchecked(&tcon->num_fnext, 0);
66361 + atomic_set_unchecked(&tcon->num_fclose, 0);
66362 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
66363 + atomic_set_unchecked(&tcon->num_symlinks, 0);
66364 + atomic_set_unchecked(&tcon->num_locks, 0);
66365 }
66366 }
66367 }
66368 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
66369 if (tcon->need_reconnect)
66370 seq_puts(m, "\tDISCONNECTED ");
66371 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
66372 - atomic_read(&tcon->num_smbs_sent),
66373 - atomic_read(&tcon->num_oplock_brks));
66374 + atomic_read_unchecked(&tcon->num_smbs_sent),
66375 + atomic_read_unchecked(&tcon->num_oplock_brks));
66376 seq_printf(m, "\nReads: %d Bytes: %lld",
66377 - atomic_read(&tcon->num_reads),
66378 + atomic_read_unchecked(&tcon->num_reads),
66379 (long long)(tcon->bytes_read));
66380 seq_printf(m, "\nWrites: %d Bytes: %lld",
66381 - atomic_read(&tcon->num_writes),
66382 + atomic_read_unchecked(&tcon->num_writes),
66383 (long long)(tcon->bytes_written));
66384 seq_printf(m, "\nFlushes: %d",
66385 - atomic_read(&tcon->num_flushes));
66386 + atomic_read_unchecked(&tcon->num_flushes));
66387 seq_printf(m, "\nLocks: %d HardLinks: %d "
66388 "Symlinks: %d",
66389 - atomic_read(&tcon->num_locks),
66390 - atomic_read(&tcon->num_hardlinks),
66391 - atomic_read(&tcon->num_symlinks));
66392 + atomic_read_unchecked(&tcon->num_locks),
66393 + atomic_read_unchecked(&tcon->num_hardlinks),
66394 + atomic_read_unchecked(&tcon->num_symlinks));
66395 seq_printf(m, "\nOpens: %d Closes: %d "
66396 "Deletes: %d",
66397 - atomic_read(&tcon->num_opens),
66398 - atomic_read(&tcon->num_closes),
66399 - atomic_read(&tcon->num_deletes));
66400 + atomic_read_unchecked(&tcon->num_opens),
66401 + atomic_read_unchecked(&tcon->num_closes),
66402 + atomic_read_unchecked(&tcon->num_deletes));
66403 seq_printf(m, "\nPosix Opens: %d "
66404 "Posix Mkdirs: %d",
66405 - atomic_read(&tcon->num_posixopens),
66406 - atomic_read(&tcon->num_posixmkdirs));
66407 + atomic_read_unchecked(&tcon->num_posixopens),
66408 + atomic_read_unchecked(&tcon->num_posixmkdirs));
66409 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
66410 - atomic_read(&tcon->num_mkdirs),
66411 - atomic_read(&tcon->num_rmdirs));
66412 + atomic_read_unchecked(&tcon->num_mkdirs),
66413 + atomic_read_unchecked(&tcon->num_rmdirs));
66414 seq_printf(m, "\nRenames: %d T2 Renames %d",
66415 - atomic_read(&tcon->num_renames),
66416 - atomic_read(&tcon->num_t2renames));
66417 + atomic_read_unchecked(&tcon->num_renames),
66418 + atomic_read_unchecked(&tcon->num_t2renames));
66419 seq_printf(m, "\nFindFirst: %d FNext %d "
66420 "FClose %d",
66421 - atomic_read(&tcon->num_ffirst),
66422 - atomic_read(&tcon->num_fnext),
66423 - atomic_read(&tcon->num_fclose));
66424 + atomic_read_unchecked(&tcon->num_ffirst),
66425 + atomic_read_unchecked(&tcon->num_fnext),
66426 + atomic_read_unchecked(&tcon->num_fclose));
66427 }
66428 }
66429 }
66430 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
66431 index 1445407..68cb0dc 100644
66432 --- a/fs/cifs/cifsfs.c
66433 +++ b/fs/cifs/cifsfs.c
66434 @@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
66435 cifs_req_cachep = kmem_cache_create("cifs_request",
66436 CIFSMaxBufSize +
66437 MAX_CIFS_HDR_SIZE, 0,
66438 - SLAB_HWCACHE_ALIGN, NULL);
66439 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
66440 if (cifs_req_cachep == NULL)
66441 return -ENOMEM;
66442
66443 @@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
66444 efficient to alloc 1 per page off the slab compared to 17K (5page)
66445 alloc of large cifs buffers even when page debugging is on */
66446 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
66447 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
66448 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
66449 NULL);
66450 if (cifs_sm_req_cachep == NULL) {
66451 mempool_destroy(cifs_req_poolp);
66452 @@ -991,8 +991,8 @@ init_cifs(void)
66453 atomic_set(&bufAllocCount, 0);
66454 atomic_set(&smBufAllocCount, 0);
66455 #ifdef CONFIG_CIFS_STATS2
66456 - atomic_set(&totBufAllocCount, 0);
66457 - atomic_set(&totSmBufAllocCount, 0);
66458 + atomic_set_unchecked(&totBufAllocCount, 0);
66459 + atomic_set_unchecked(&totSmBufAllocCount, 0);
66460 #endif /* CONFIG_CIFS_STATS2 */
66461
66462 atomic_set(&midCount, 0);
66463 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
66464 index e29581e..1c22bab 100644
66465 --- a/fs/cifs/cifsglob.h
66466 +++ b/fs/cifs/cifsglob.h
66467 @@ -252,28 +252,28 @@ struct cifsTconInfo {
66468 __u16 Flags; /* optional support bits */
66469 enum statusEnum tidStatus;
66470 #ifdef CONFIG_CIFS_STATS
66471 - atomic_t num_smbs_sent;
66472 - atomic_t num_writes;
66473 - atomic_t num_reads;
66474 - atomic_t num_flushes;
66475 - atomic_t num_oplock_brks;
66476 - atomic_t num_opens;
66477 - atomic_t num_closes;
66478 - atomic_t num_deletes;
66479 - atomic_t num_mkdirs;
66480 - atomic_t num_posixopens;
66481 - atomic_t num_posixmkdirs;
66482 - atomic_t num_rmdirs;
66483 - atomic_t num_renames;
66484 - atomic_t num_t2renames;
66485 - atomic_t num_ffirst;
66486 - atomic_t num_fnext;
66487 - atomic_t num_fclose;
66488 - atomic_t num_hardlinks;
66489 - atomic_t num_symlinks;
66490 - atomic_t num_locks;
66491 - atomic_t num_acl_get;
66492 - atomic_t num_acl_set;
66493 + atomic_unchecked_t num_smbs_sent;
66494 + atomic_unchecked_t num_writes;
66495 + atomic_unchecked_t num_reads;
66496 + atomic_unchecked_t num_flushes;
66497 + atomic_unchecked_t num_oplock_brks;
66498 + atomic_unchecked_t num_opens;
66499 + atomic_unchecked_t num_closes;
66500 + atomic_unchecked_t num_deletes;
66501 + atomic_unchecked_t num_mkdirs;
66502 + atomic_unchecked_t num_posixopens;
66503 + atomic_unchecked_t num_posixmkdirs;
66504 + atomic_unchecked_t num_rmdirs;
66505 + atomic_unchecked_t num_renames;
66506 + atomic_unchecked_t num_t2renames;
66507 + atomic_unchecked_t num_ffirst;
66508 + atomic_unchecked_t num_fnext;
66509 + atomic_unchecked_t num_fclose;
66510 + atomic_unchecked_t num_hardlinks;
66511 + atomic_unchecked_t num_symlinks;
66512 + atomic_unchecked_t num_locks;
66513 + atomic_unchecked_t num_acl_get;
66514 + atomic_unchecked_t num_acl_set;
66515 #ifdef CONFIG_CIFS_STATS2
66516 unsigned long long time_writes;
66517 unsigned long long time_reads;
66518 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
66519 }
66520
66521 #ifdef CONFIG_CIFS_STATS
66522 -#define cifs_stats_inc atomic_inc
66523 +#define cifs_stats_inc atomic_inc_unchecked
66524
66525 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
66526 unsigned int bytes)
66527 @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
66528 /* Various Debug counters */
66529 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
66530 #ifdef CONFIG_CIFS_STATS2
66531 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
66532 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
66533 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
66534 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
66535 #endif
66536 GLOBAL_EXTERN atomic_t smBufAllocCount;
66537 GLOBAL_EXTERN atomic_t midCount;
66538 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
66539 index fc1e048..28b3441 100644
66540 --- a/fs/cifs/link.c
66541 +++ b/fs/cifs/link.c
66542 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
66543
66544 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
66545 {
66546 - char *p = nd_get_link(nd);
66547 + const char *p = nd_get_link(nd);
66548 if (!IS_ERR(p))
66549 kfree(p);
66550 }
66551 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
66552 index 95b82e8..12a538d 100644
66553 --- a/fs/cifs/misc.c
66554 +++ b/fs/cifs/misc.c
66555 @@ -155,7 +155,7 @@ cifs_buf_get(void)
66556 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
66557 atomic_inc(&bufAllocCount);
66558 #ifdef CONFIG_CIFS_STATS2
66559 - atomic_inc(&totBufAllocCount);
66560 + atomic_inc_unchecked(&totBufAllocCount);
66561 #endif /* CONFIG_CIFS_STATS2 */
66562 }
66563
66564 @@ -190,7 +190,7 @@ cifs_small_buf_get(void)
66565 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
66566 atomic_inc(&smBufAllocCount);
66567 #ifdef CONFIG_CIFS_STATS2
66568 - atomic_inc(&totSmBufAllocCount);
66569 + atomic_inc_unchecked(&totSmBufAllocCount);
66570 #endif /* CONFIG_CIFS_STATS2 */
66571
66572 }
66573 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
66574 index a5bf577..6d19845 100644
66575 --- a/fs/coda/cache.c
66576 +++ b/fs/coda/cache.c
66577 @@ -24,14 +24,14 @@
66578 #include <linux/coda_fs_i.h>
66579 #include <linux/coda_cache.h>
66580
66581 -static atomic_t permission_epoch = ATOMIC_INIT(0);
66582 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
66583
66584 /* replace or extend an acl cache hit */
66585 void coda_cache_enter(struct inode *inode, int mask)
66586 {
66587 struct coda_inode_info *cii = ITOC(inode);
66588
66589 - cii->c_cached_epoch = atomic_read(&permission_epoch);
66590 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
66591 if (cii->c_uid != current_fsuid()) {
66592 cii->c_uid = current_fsuid();
66593 cii->c_cached_perm = mask;
66594 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
66595 void coda_cache_clear_inode(struct inode *inode)
66596 {
66597 struct coda_inode_info *cii = ITOC(inode);
66598 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
66599 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
66600 }
66601
66602 /* remove all acl caches */
66603 void coda_cache_clear_all(struct super_block *sb)
66604 {
66605 - atomic_inc(&permission_epoch);
66606 + atomic_inc_unchecked(&permission_epoch);
66607 }
66608
66609
66610 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
66611
66612 hit = (mask & cii->c_cached_perm) == mask &&
66613 cii->c_uid == current_fsuid() &&
66614 - cii->c_cached_epoch == atomic_read(&permission_epoch);
66615 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
66616
66617 return hit;
66618 }
66619 diff --git a/fs/compat.c b/fs/compat.c
66620 index d1e2411..9a958d2 100644
66621 --- a/fs/compat.c
66622 +++ b/fs/compat.c
66623 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
66624 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
66625 {
66626 compat_ino_t ino = stat->ino;
66627 - typeof(ubuf->st_uid) uid = 0;
66628 - typeof(ubuf->st_gid) gid = 0;
66629 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
66630 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
66631 int err;
66632
66633 SET_UID(uid, stat->uid);
66634 @@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
66635
66636 set_fs(KERNEL_DS);
66637 /* The __user pointer cast is valid because of the set_fs() */
66638 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
66639 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
66640 set_fs(oldfs);
66641 /* truncating is ok because it's a user address */
66642 if (!ret)
66643 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
66644
66645 struct compat_readdir_callback {
66646 struct compat_old_linux_dirent __user *dirent;
66647 + struct file * file;
66648 int result;
66649 };
66650
66651 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
66652 buf->result = -EOVERFLOW;
66653 return -EOVERFLOW;
66654 }
66655 +
66656 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66657 + return 0;
66658 +
66659 buf->result++;
66660 dirent = buf->dirent;
66661 if (!access_ok(VERIFY_WRITE, dirent,
66662 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
66663
66664 buf.result = 0;
66665 buf.dirent = dirent;
66666 + buf.file = file;
66667
66668 error = vfs_readdir(file, compat_fillonedir, &buf);
66669 if (buf.result)
66670 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
66671 struct compat_getdents_callback {
66672 struct compat_linux_dirent __user *current_dir;
66673 struct compat_linux_dirent __user *previous;
66674 + struct file * file;
66675 int count;
66676 int error;
66677 };
66678 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
66679 buf->error = -EOVERFLOW;
66680 return -EOVERFLOW;
66681 }
66682 +
66683 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66684 + return 0;
66685 +
66686 dirent = buf->previous;
66687 if (dirent) {
66688 if (__put_user(offset, &dirent->d_off))
66689 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
66690 buf.previous = NULL;
66691 buf.count = count;
66692 buf.error = 0;
66693 + buf.file = file;
66694
66695 error = vfs_readdir(file, compat_filldir, &buf);
66696 if (error >= 0)
66697 @@ -987,6 +999,7 @@ out:
66698 struct compat_getdents_callback64 {
66699 struct linux_dirent64 __user *current_dir;
66700 struct linux_dirent64 __user *previous;
66701 + struct file * file;
66702 int count;
66703 int error;
66704 };
66705 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
66706 buf->error = -EINVAL; /* only used if we fail.. */
66707 if (reclen > buf->count)
66708 return -EINVAL;
66709 +
66710 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66711 + return 0;
66712 +
66713 dirent = buf->previous;
66714
66715 if (dirent) {
66716 @@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
66717 buf.previous = NULL;
66718 buf.count = count;
66719 buf.error = 0;
66720 + buf.file = file;
66721
66722 error = vfs_readdir(file, compat_filldir64, &buf);
66723 if (error >= 0)
66724 error = buf.error;
66725 lastdirent = buf.previous;
66726 if (lastdirent) {
66727 - typeof(lastdirent->d_off) d_off = file->f_pos;
66728 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
66729 if (__put_user_unaligned(d_off, &lastdirent->d_off))
66730 error = -EFAULT;
66731 else
66732 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
66733 * verify all the pointers
66734 */
66735 ret = -EINVAL;
66736 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
66737 + if (nr_segs > UIO_MAXIOV)
66738 goto out;
66739 if (!file->f_op)
66740 goto out;
66741 @@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
66742 compat_uptr_t __user *envp,
66743 struct pt_regs * regs)
66744 {
66745 +#ifdef CONFIG_GRKERNSEC
66746 + struct file *old_exec_file;
66747 + struct acl_subject_label *old_acl;
66748 + struct rlimit old_rlim[RLIM_NLIMITS];
66749 +#endif
66750 struct linux_binprm *bprm;
66751 struct file *file;
66752 struct files_struct *displaced;
66753 bool clear_in_exec;
66754 int retval;
66755 + const struct cred *cred = current_cred();
66756 +
66757 + /*
66758 + * We move the actual failure in case of RLIMIT_NPROC excess from
66759 + * set*uid() to execve() because too many poorly written programs
66760 + * don't check setuid() return code. Here we additionally recheck
66761 + * whether NPROC limit is still exceeded.
66762 + */
66763 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
66764 +
66765 + if ((current->flags & PF_NPROC_EXCEEDED) &&
66766 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
66767 + retval = -EAGAIN;
66768 + goto out_ret;
66769 + }
66770 +
66771 + /* We're below the limit (still or again), so we don't want to make
66772 + * further execve() calls fail. */
66773 + current->flags &= ~PF_NPROC_EXCEEDED;
66774
66775 retval = unshare_files(&displaced);
66776 if (retval)
66777 @@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename,
66778 if (IS_ERR(file))
66779 goto out_unmark;
66780
66781 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
66782 + retval = -EPERM;
66783 + goto out_file;
66784 + }
66785 +
66786 sched_exec();
66787
66788 bprm->file = file;
66789 bprm->filename = filename;
66790 bprm->interp = filename;
66791
66792 + if (gr_process_user_ban()) {
66793 + retval = -EPERM;
66794 + goto out_file;
66795 + }
66796 +
66797 + retval = -EACCES;
66798 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
66799 + goto out_file;
66800 +
66801 retval = bprm_mm_init(bprm);
66802 if (retval)
66803 goto out_file;
66804 @@ -1515,24 +1571,63 @@ int compat_do_execve(char * filename,
66805 if (retval < 0)
66806 goto out;
66807
66808 +#ifdef CONFIG_GRKERNSEC
66809 + old_acl = current->acl;
66810 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
66811 + old_exec_file = current->exec_file;
66812 + get_file(file);
66813 + current->exec_file = file;
66814 +#endif
66815 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66816 + /* limit suid stack to 8MB
66817 + we saved the old limits above and will restore them if this exec fails
66818 + */
66819 + if ((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid()))
66820 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
66821 +#endif
66822 +
66823 + if (!gr_tpe_allow(file)) {
66824 + retval = -EACCES;
66825 + goto out_fail;
66826 + }
66827 +
66828 + if (gr_check_crash_exec(file)) {
66829 + retval = -EACCES;
66830 + goto out_fail;
66831 + }
66832 +
66833 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
66834 + bprm->unsafe);
66835 + if (retval < 0)
66836 + goto out_fail;
66837 +
66838 retval = copy_strings_kernel(1, &bprm->filename, bprm);
66839 if (retval < 0)
66840 - goto out;
66841 + goto out_fail;
66842
66843 bprm->exec = bprm->p;
66844 retval = compat_copy_strings(bprm->envc, envp, bprm);
66845 if (retval < 0)
66846 - goto out;
66847 + goto out_fail;
66848
66849 retval = compat_copy_strings(bprm->argc, argv, bprm);
66850 if (retval < 0)
66851 - goto out;
66852 + goto out_fail;
66853 +
66854 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
66855 +
66856 + gr_handle_exec_args_compat(bprm, argv);
66857
66858 retval = search_binary_handler(bprm, regs);
66859 if (retval < 0)
66860 - goto out;
66861 + goto out_fail;
66862 +#ifdef CONFIG_GRKERNSEC
66863 + if (old_exec_file)
66864 + fput(old_exec_file);
66865 +#endif
66866
66867 /* execve succeeded */
66868 + increment_exec_counter();
66869 current->fs->in_exec = 0;
66870 current->in_execve = 0;
66871 acct_update_integrals(current);
66872 @@ -1541,6 +1636,14 @@ int compat_do_execve(char * filename,
66873 put_files_struct(displaced);
66874 return retval;
66875
66876 +out_fail:
66877 +#ifdef CONFIG_GRKERNSEC
66878 + current->acl = old_acl;
66879 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
66880 + fput(current->exec_file);
66881 + current->exec_file = old_exec_file;
66882 +#endif
66883 +
66884 out:
66885 if (bprm->mm) {
66886 acct_arg_size(bprm, 0);
66887 @@ -1711,6 +1814,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
66888 struct fdtable *fdt;
66889 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
66890
66891 + pax_track_stack();
66892 +
66893 if (n < 0)
66894 goto out_nofds;
66895
66896 @@ -2151,7 +2256,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
66897 oldfs = get_fs();
66898 set_fs(KERNEL_DS);
66899 /* The __user pointer casts are valid because of the set_fs() */
66900 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
66901 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
66902 set_fs(oldfs);
66903
66904 if (err)
66905 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
66906 index 0adced2..bbb1b0d 100644
66907 --- a/fs/compat_binfmt_elf.c
66908 +++ b/fs/compat_binfmt_elf.c
66909 @@ -29,10 +29,12 @@
66910 #undef elfhdr
66911 #undef elf_phdr
66912 #undef elf_note
66913 +#undef elf_dyn
66914 #undef elf_addr_t
66915 #define elfhdr elf32_hdr
66916 #define elf_phdr elf32_phdr
66917 #define elf_note elf32_note
66918 +#define elf_dyn Elf32_Dyn
66919 #define elf_addr_t Elf32_Addr
66920
66921 /*
66922 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
66923 index d84e705..d8c364c 100644
66924 --- a/fs/compat_ioctl.c
66925 +++ b/fs/compat_ioctl.c
66926 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
66927 up = (struct compat_video_spu_palette __user *) arg;
66928 err = get_user(palp, &up->palette);
66929 err |= get_user(length, &up->length);
66930 + if (err)
66931 + return -EFAULT;
66932
66933 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
66934 err = put_user(compat_ptr(palp), &up_native->palette);
66935 @@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
66936 return -EFAULT;
66937 if (__get_user(udata, &ss32->iomem_base))
66938 return -EFAULT;
66939 - ss.iomem_base = compat_ptr(udata);
66940 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
66941 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
66942 __get_user(ss.port_high, &ss32->port_high))
66943 return -EFAULT;
66944 @@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
66945 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
66946 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
66947 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
66948 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
66949 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
66950 return -EFAULT;
66951
66952 return ioctl_preallocate(file, p);
66953 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
66954 index 8e48b52..f01ed91 100644
66955 --- a/fs/configfs/dir.c
66956 +++ b/fs/configfs/dir.c
66957 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66958 }
66959 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
66960 struct configfs_dirent *next;
66961 - const char * name;
66962 + const unsigned char * name;
66963 + char d_name[sizeof(next->s_dentry->d_iname)];
66964 int len;
66965
66966 next = list_entry(p, struct configfs_dirent,
66967 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66968 continue;
66969
66970 name = configfs_get_name(next);
66971 - len = strlen(name);
66972 + if (next->s_dentry && name == next->s_dentry->d_iname) {
66973 + len = next->s_dentry->d_name.len;
66974 + memcpy(d_name, name, len);
66975 + name = d_name;
66976 + } else
66977 + len = strlen(name);
66978 if (next->s_dentry)
66979 ino = next->s_dentry->d_inode->i_ino;
66980 else
66981 diff --git a/fs/dcache.c b/fs/dcache.c
66982 index 44c0aea..2529092 100644
66983 --- a/fs/dcache.c
66984 +++ b/fs/dcache.c
66985 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
66986
66987 static struct kmem_cache *dentry_cache __read_mostly;
66988
66989 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66990 -
66991 /*
66992 * This is the single most critical data structure when it comes
66993 * to the dcache: the hashtable for lookups. Somebody should try
66994 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
66995 mempages -= reserve;
66996
66997 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
66998 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
66999 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
67000
67001 dcache_init();
67002 inode_init();
67003 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
67004 index 39c6ee8..dcee0f1 100644
67005 --- a/fs/debugfs/inode.c
67006 +++ b/fs/debugfs/inode.c
67007 @@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
67008 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
67009 {
67010 return debugfs_create_file(name,
67011 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67012 + S_IFDIR | S_IRWXU,
67013 +#else
67014 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
67015 +#endif
67016 parent, NULL, NULL);
67017 }
67018 EXPORT_SYMBOL_GPL(debugfs_create_dir);
67019 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
67020 index c010ecf..a8d8c59 100644
67021 --- a/fs/dlm/lockspace.c
67022 +++ b/fs/dlm/lockspace.c
67023 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
67024 kfree(ls);
67025 }
67026
67027 -static struct sysfs_ops dlm_attr_ops = {
67028 +static const struct sysfs_ops dlm_attr_ops = {
67029 .show = dlm_attr_show,
67030 .store = dlm_attr_store,
67031 };
67032 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
67033 index 7e164bb..62fa913 100644
67034 --- a/fs/ecryptfs/crypto.c
67035 +++ b/fs/ecryptfs/crypto.c
67036 @@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
67037 rc);
67038 goto out;
67039 }
67040 - if (unlikely(ecryptfs_verbosity > 0)) {
67041 - ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
67042 - "with iv:\n");
67043 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
67044 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
67045 - "encryption:\n");
67046 - ecryptfs_dump_hex((char *)
67047 - (page_address(page)
67048 - + (extent_offset * crypt_stat->extent_size)),
67049 - 8);
67050 - }
67051 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
67052 page, (extent_offset
67053 * crypt_stat->extent_size),
67054 @@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
67055 goto out;
67056 }
67057 rc = 0;
67058 - if (unlikely(ecryptfs_verbosity > 0)) {
67059 - ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
67060 - "rc = [%d]\n", (extent_base + extent_offset),
67061 - rc);
67062 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
67063 - "encryption:\n");
67064 - ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
67065 - }
67066 out:
67067 return rc;
67068 }
67069 @@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
67070 rc);
67071 goto out;
67072 }
67073 - if (unlikely(ecryptfs_verbosity > 0)) {
67074 - ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
67075 - "with iv:\n");
67076 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
67077 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
67078 - "decryption:\n");
67079 - ecryptfs_dump_hex((char *)
67080 - (page_address(enc_extent_page)
67081 - + (extent_offset * crypt_stat->extent_size)),
67082 - 8);
67083 - }
67084 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
67085 (extent_offset
67086 * crypt_stat->extent_size),
67087 @@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
67088 goto out;
67089 }
67090 rc = 0;
67091 - if (unlikely(ecryptfs_verbosity > 0)) {
67092 - ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
67093 - "rc = [%d]\n", (extent_base + extent_offset),
67094 - rc);
67095 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
67096 - "decryption:\n");
67097 - ecryptfs_dump_hex((char *)(page_address(page)
67098 - + (extent_offset
67099 - * crypt_stat->extent_size)), 8);
67100 - }
67101 out:
67102 return rc;
67103 }
67104 diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
67105 index 502b09f..49129f4 100644
67106 --- a/fs/ecryptfs/file.c
67107 +++ b/fs/ecryptfs/file.c
67108 @@ -348,7 +348,6 @@ const struct file_operations ecryptfs_main_fops = {
67109 #ifdef CONFIG_COMPAT
67110 .compat_ioctl = ecryptfs_compat_ioctl,
67111 #endif
67112 - .mmap = generic_file_mmap,
67113 .open = ecryptfs_open,
67114 .flush = ecryptfs_flush,
67115 .release = ecryptfs_release,
67116 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
67117 index 90a6087..fa05803 100644
67118 --- a/fs/ecryptfs/inode.c
67119 +++ b/fs/ecryptfs/inode.c
67120 @@ -647,7 +647,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
67121 old_fs = get_fs();
67122 set_fs(get_ds());
67123 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
67124 - (char __user *)lower_buf,
67125 + (char __force_user *)lower_buf,
67126 lower_bufsiz);
67127 set_fs(old_fs);
67128 if (rc < 0)
67129 @@ -693,7 +693,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
67130 }
67131 old_fs = get_fs();
67132 set_fs(get_ds());
67133 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
67134 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
67135 set_fs(old_fs);
67136 if (rc < 0)
67137 goto out_free;
67138 diff --git a/fs/exec.c b/fs/exec.c
67139 index 86fafc6..6a109b9 100644
67140 --- a/fs/exec.c
67141 +++ b/fs/exec.c
67142 @@ -56,12 +56,28 @@
67143 #include <linux/fsnotify.h>
67144 #include <linux/fs_struct.h>
67145 #include <linux/pipe_fs_i.h>
67146 +#include <linux/random.h>
67147 +#include <linux/seq_file.h>
67148 +
67149 +#ifdef CONFIG_PAX_REFCOUNT
67150 +#include <linux/kallsyms.h>
67151 +#include <linux/kdebug.h>
67152 +#endif
67153
67154 #include <asm/uaccess.h>
67155 #include <asm/mmu_context.h>
67156 #include <asm/tlb.h>
67157 #include "internal.h"
67158
67159 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
67160 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
67161 +#endif
67162 +
67163 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
67164 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
67165 +EXPORT_SYMBOL(pax_set_initial_flags_func);
67166 +#endif
67167 +
67168 int core_uses_pid;
67169 char core_pattern[CORENAME_MAX_SIZE] = "core";
67170 unsigned int core_pipe_limit;
67171 @@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
67172 int write)
67173 {
67174 struct page *page;
67175 - int ret;
67176
67177 -#ifdef CONFIG_STACK_GROWSUP
67178 - if (write) {
67179 - ret = expand_stack_downwards(bprm->vma, pos);
67180 - if (ret < 0)
67181 - return NULL;
67182 - }
67183 -#endif
67184 - ret = get_user_pages(current, bprm->mm, pos,
67185 - 1, write, 1, &page, NULL);
67186 - if (ret <= 0)
67187 + if (0 > expand_stack_downwards(bprm->vma, pos))
67188 + return NULL;
67189 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
67190 return NULL;
67191
67192 if (write) {
67193 @@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
67194 if (size <= ARG_MAX)
67195 return page;
67196
67197 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67198 + // only allow 512KB for argv+env on suid/sgid binaries
67199 + // to prevent easy ASLR exhaustion
67200 + if (((bprm->cred->euid != current_euid()) ||
67201 + (bprm->cred->egid != current_egid())) &&
67202 + (size > (512 * 1024))) {
67203 + put_page(page);
67204 + return NULL;
67205 + }
67206 +#endif
67207 +
67208 /*
67209 * Limit to 1/4-th the stack size for the argv+env strings.
67210 * This ensures that:
67211 @@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
67212 vma->vm_end = STACK_TOP_MAX;
67213 vma->vm_start = vma->vm_end - PAGE_SIZE;
67214 vma->vm_flags = VM_STACK_FLAGS;
67215 +
67216 +#ifdef CONFIG_PAX_SEGMEXEC
67217 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
67218 +#endif
67219 +
67220 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
67221
67222 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
67223 @@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
67224 mm->stack_vm = mm->total_vm = 1;
67225 up_write(&mm->mmap_sem);
67226 bprm->p = vma->vm_end - sizeof(void *);
67227 +
67228 +#ifdef CONFIG_PAX_RANDUSTACK
67229 + if (randomize_va_space)
67230 + bprm->p ^= random32() & ~PAGE_MASK;
67231 +#endif
67232 +
67233 return 0;
67234 err:
67235 up_write(&mm->mmap_sem);
67236 @@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
67237 int r;
67238 mm_segment_t oldfs = get_fs();
67239 set_fs(KERNEL_DS);
67240 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
67241 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
67242 set_fs(oldfs);
67243 return r;
67244 }
67245 @@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
67246 unsigned long new_end = old_end - shift;
67247 struct mmu_gather *tlb;
67248
67249 - BUG_ON(new_start > new_end);
67250 + if (new_start >= new_end || new_start < mmap_min_addr)
67251 + return -ENOMEM;
67252
67253 /*
67254 * ensure there are no vmas between where we want to go
67255 @@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
67256 if (vma != find_vma(mm, new_start))
67257 return -EFAULT;
67258
67259 +#ifdef CONFIG_PAX_SEGMEXEC
67260 + BUG_ON(pax_find_mirror_vma(vma));
67261 +#endif
67262 +
67263 /*
67264 * cover the whole range: [new_start, old_end)
67265 */
67266 @@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
67267 stack_top = arch_align_stack(stack_top);
67268 stack_top = PAGE_ALIGN(stack_top);
67269
67270 - if (unlikely(stack_top < mmap_min_addr) ||
67271 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
67272 - return -ENOMEM;
67273 -
67274 stack_shift = vma->vm_end - stack_top;
67275
67276 bprm->p -= stack_shift;
67277 @@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
67278 bprm->exec -= stack_shift;
67279
67280 down_write(&mm->mmap_sem);
67281 +
67282 + /* Move stack pages down in memory. */
67283 + if (stack_shift) {
67284 + ret = shift_arg_pages(vma, stack_shift);
67285 + if (ret)
67286 + goto out_unlock;
67287 + }
67288 +
67289 vm_flags = VM_STACK_FLAGS;
67290
67291 /*
67292 @@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
67293 vm_flags &= ~VM_EXEC;
67294 vm_flags |= mm->def_flags;
67295
67296 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67297 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
67298 + vm_flags &= ~VM_EXEC;
67299 +
67300 +#ifdef CONFIG_PAX_MPROTECT
67301 + if (mm->pax_flags & MF_PAX_MPROTECT)
67302 + vm_flags &= ~VM_MAYEXEC;
67303 +#endif
67304 +
67305 + }
67306 +#endif
67307 +
67308 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
67309 vm_flags);
67310 if (ret)
67311 goto out_unlock;
67312 BUG_ON(prev != vma);
67313
67314 - /* Move stack pages down in memory. */
67315 - if (stack_shift) {
67316 - ret = shift_arg_pages(vma, stack_shift);
67317 - if (ret)
67318 - goto out_unlock;
67319 - }
67320 -
67321 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
67322 stack_size = vma->vm_end - vma->vm_start;
67323 /*
67324 @@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
67325 old_fs = get_fs();
67326 set_fs(get_ds());
67327 /* The cast to a user pointer is valid due to the set_fs() */
67328 - result = vfs_read(file, (void __user *)addr, count, &pos);
67329 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
67330 set_fs(old_fs);
67331 return result;
67332 }
67333 @@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
67334 perf_event_comm(tsk);
67335 }
67336
67337 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
67338 +{
67339 + int i, ch;
67340 +
67341 + /* Copies the binary name from after last slash */
67342 + for (i = 0; (ch = *(fn++)) != '\0';) {
67343 + if (ch == '/')
67344 + i = 0; /* overwrite what we wrote */
67345 + else
67346 + if (i < len - 1)
67347 + tcomm[i++] = ch;
67348 + }
67349 + tcomm[i] = '\0';
67350 +}
67351 +
67352 int flush_old_exec(struct linux_binprm * bprm)
67353 {
67354 int retval;
67355 @@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
67356
67357 set_mm_exe_file(bprm->mm, bprm->file);
67358
67359 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
67360 /*
67361 * Release all of the old mmap stuff
67362 */
67363 @@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
67364
67365 void setup_new_exec(struct linux_binprm * bprm)
67366 {
67367 - int i, ch;
67368 - char * name;
67369 - char tcomm[sizeof(current->comm)];
67370 -
67371 arch_pick_mmap_layout(current->mm);
67372
67373 /* This is the point of no return */
67374 @@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
67375 else
67376 set_dumpable(current->mm, suid_dumpable);
67377
67378 - name = bprm->filename;
67379 -
67380 - /* Copies the binary name from after last slash */
67381 - for (i=0; (ch = *(name++)) != '\0';) {
67382 - if (ch == '/')
67383 - i = 0; /* overwrite what we wrote */
67384 - else
67385 - if (i < (sizeof(tcomm) - 1))
67386 - tcomm[i++] = ch;
67387 - }
67388 - tcomm[i] = '\0';
67389 - set_task_comm(current, tcomm);
67390 + set_task_comm(current, bprm->tcomm);
67391
67392 /* Set the new mm task size. We have to do that late because it may
67393 * depend on TIF_32BIT which is only updated in flush_thread() on
67394 @@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
67395 }
67396 rcu_read_unlock();
67397
67398 - if (p->fs->users > n_fs) {
67399 + if (atomic_read(&p->fs->users) > n_fs) {
67400 bprm->unsafe |= LSM_UNSAFE_SHARE;
67401 } else {
67402 res = -EAGAIN;
67403 @@ -1339,6 +1384,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
67404
67405 EXPORT_SYMBOL(search_binary_handler);
67406
67407 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67408 +DEFINE_PER_CPU(u64, exec_counter);
67409 +static int __init init_exec_counters(void)
67410 +{
67411 + unsigned int cpu;
67412 +
67413 + for_each_possible_cpu(cpu) {
67414 + per_cpu(exec_counter, cpu) = (u64)cpu;
67415 + }
67416 +
67417 + return 0;
67418 +}
67419 +early_initcall(init_exec_counters);
67420 +#endif
67421 +
67422 /*
67423 * sys_execve() executes a new program.
67424 */
67425 @@ -1347,11 +1407,35 @@ int do_execve(char * filename,
67426 char __user *__user *envp,
67427 struct pt_regs * regs)
67428 {
67429 +#ifdef CONFIG_GRKERNSEC
67430 + struct file *old_exec_file;
67431 + struct acl_subject_label *old_acl;
67432 + struct rlimit old_rlim[RLIM_NLIMITS];
67433 +#endif
67434 struct linux_binprm *bprm;
67435 struct file *file;
67436 struct files_struct *displaced;
67437 bool clear_in_exec;
67438 int retval;
67439 + const struct cred *cred = current_cred();
67440 +
67441 + /*
67442 + * We move the actual failure in case of RLIMIT_NPROC excess from
67443 + * set*uid() to execve() because too many poorly written programs
67444 + * don't check setuid() return code. Here we additionally recheck
67445 + * whether NPROC limit is still exceeded.
67446 + */
67447 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
67448 +
67449 + if ((current->flags & PF_NPROC_EXCEEDED) &&
67450 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
67451 + retval = -EAGAIN;
67452 + goto out_ret;
67453 + }
67454 +
67455 + /* We're below the limit (still or again), so we don't want to make
67456 + * further execve() calls fail. */
67457 + current->flags &= ~PF_NPROC_EXCEEDED;
67458
67459 retval = unshare_files(&displaced);
67460 if (retval)
67461 @@ -1377,12 +1461,27 @@ int do_execve(char * filename,
67462 if (IS_ERR(file))
67463 goto out_unmark;
67464
67465 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
67466 + retval = -EPERM;
67467 + goto out_file;
67468 + }
67469 +
67470 sched_exec();
67471
67472 bprm->file = file;
67473 bprm->filename = filename;
67474 bprm->interp = filename;
67475
67476 + if (gr_process_user_ban()) {
67477 + retval = -EPERM;
67478 + goto out_file;
67479 + }
67480 +
67481 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
67482 + retval = -EACCES;
67483 + goto out_file;
67484 + }
67485 +
67486 retval = bprm_mm_init(bprm);
67487 if (retval)
67488 goto out_file;
67489 @@ -1399,25 +1498,66 @@ int do_execve(char * filename,
67490 if (retval < 0)
67491 goto out;
67492
67493 +#ifdef CONFIG_GRKERNSEC
67494 + old_acl = current->acl;
67495 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
67496 + old_exec_file = current->exec_file;
67497 + get_file(file);
67498 + current->exec_file = file;
67499 +#endif
67500 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67501 + /* limit suid stack to 8MB
67502 + we saved the old limits above and will restore them if this exec fails
67503 + */
67504 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
67505 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
67506 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
67507 +#endif
67508 +
67509 + if (!gr_tpe_allow(file)) {
67510 + retval = -EACCES;
67511 + goto out_fail;
67512 + }
67513 +
67514 + if (gr_check_crash_exec(file)) {
67515 + retval = -EACCES;
67516 + goto out_fail;
67517 + }
67518 +
67519 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
67520 + bprm->unsafe);
67521 + if (retval < 0)
67522 + goto out_fail;
67523 +
67524 retval = copy_strings_kernel(1, &bprm->filename, bprm);
67525 if (retval < 0)
67526 - goto out;
67527 + goto out_fail;
67528
67529 bprm->exec = bprm->p;
67530 retval = copy_strings(bprm->envc, envp, bprm);
67531 if (retval < 0)
67532 - goto out;
67533 + goto out_fail;
67534
67535 retval = copy_strings(bprm->argc, argv, bprm);
67536 if (retval < 0)
67537 - goto out;
67538 + goto out_fail;
67539 +
67540 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
67541 +
67542 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
67543
67544 current->flags &= ~PF_KTHREAD;
67545 retval = search_binary_handler(bprm,regs);
67546 if (retval < 0)
67547 - goto out;
67548 + goto out_fail;
67549 +#ifdef CONFIG_GRKERNSEC
67550 + if (old_exec_file)
67551 + fput(old_exec_file);
67552 +#endif
67553
67554 /* execve succeeded */
67555 +
67556 + increment_exec_counter();
67557 current->fs->in_exec = 0;
67558 current->in_execve = 0;
67559 acct_update_integrals(current);
67560 @@ -1426,6 +1566,14 @@ int do_execve(char * filename,
67561 put_files_struct(displaced);
67562 return retval;
67563
67564 +out_fail:
67565 +#ifdef CONFIG_GRKERNSEC
67566 + current->acl = old_acl;
67567 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
67568 + fput(current->exec_file);
67569 + current->exec_file = old_exec_file;
67570 +#endif
67571 +
67572 out:
67573 if (bprm->mm) {
67574 acct_arg_size(bprm, 0);
67575 @@ -1591,6 +1739,229 @@ out:
67576 return ispipe;
67577 }
67578
67579 +int pax_check_flags(unsigned long *flags)
67580 +{
67581 + int retval = 0;
67582 +
67583 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
67584 + if (*flags & MF_PAX_SEGMEXEC)
67585 + {
67586 + *flags &= ~MF_PAX_SEGMEXEC;
67587 + retval = -EINVAL;
67588 + }
67589 +#endif
67590 +
67591 + if ((*flags & MF_PAX_PAGEEXEC)
67592 +
67593 +#ifdef CONFIG_PAX_PAGEEXEC
67594 + && (*flags & MF_PAX_SEGMEXEC)
67595 +#endif
67596 +
67597 + )
67598 + {
67599 + *flags &= ~MF_PAX_PAGEEXEC;
67600 + retval = -EINVAL;
67601 + }
67602 +
67603 + if ((*flags & MF_PAX_MPROTECT)
67604 +
67605 +#ifdef CONFIG_PAX_MPROTECT
67606 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
67607 +#endif
67608 +
67609 + )
67610 + {
67611 + *flags &= ~MF_PAX_MPROTECT;
67612 + retval = -EINVAL;
67613 + }
67614 +
67615 + if ((*flags & MF_PAX_EMUTRAMP)
67616 +
67617 +#ifdef CONFIG_PAX_EMUTRAMP
67618 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
67619 +#endif
67620 +
67621 + )
67622 + {
67623 + *flags &= ~MF_PAX_EMUTRAMP;
67624 + retval = -EINVAL;
67625 + }
67626 +
67627 + return retval;
67628 +}
67629 +
67630 +EXPORT_SYMBOL(pax_check_flags);
67631 +
67632 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67633 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
67634 +{
67635 + struct task_struct *tsk = current;
67636 + struct mm_struct *mm = current->mm;
67637 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
67638 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
67639 + char *path_exec = NULL;
67640 + char *path_fault = NULL;
67641 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
67642 +
67643 + if (buffer_exec && buffer_fault) {
67644 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
67645 +
67646 + down_read(&mm->mmap_sem);
67647 + vma = mm->mmap;
67648 + while (vma && (!vma_exec || !vma_fault)) {
67649 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
67650 + vma_exec = vma;
67651 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
67652 + vma_fault = vma;
67653 + vma = vma->vm_next;
67654 + }
67655 + if (vma_exec) {
67656 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
67657 + if (IS_ERR(path_exec))
67658 + path_exec = "<path too long>";
67659 + else {
67660 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
67661 + if (path_exec) {
67662 + *path_exec = 0;
67663 + path_exec = buffer_exec;
67664 + } else
67665 + path_exec = "<path too long>";
67666 + }
67667 + }
67668 + if (vma_fault) {
67669 + start = vma_fault->vm_start;
67670 + end = vma_fault->vm_end;
67671 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
67672 + if (vma_fault->vm_file) {
67673 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
67674 + if (IS_ERR(path_fault))
67675 + path_fault = "<path too long>";
67676 + else {
67677 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
67678 + if (path_fault) {
67679 + *path_fault = 0;
67680 + path_fault = buffer_fault;
67681 + } else
67682 + path_fault = "<path too long>";
67683 + }
67684 + } else
67685 + path_fault = "<anonymous mapping>";
67686 + }
67687 + up_read(&mm->mmap_sem);
67688 + }
67689 + if (tsk->signal->curr_ip)
67690 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
67691 + else
67692 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
67693 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
67694 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
67695 + task_uid(tsk), task_euid(tsk), pc, sp);
67696 + free_page((unsigned long)buffer_exec);
67697 + free_page((unsigned long)buffer_fault);
67698 + pax_report_insns(regs, pc, sp);
67699 + do_coredump(SIGKILL, SIGKILL, regs);
67700 +}
67701 +#endif
67702 +
67703 +#ifdef CONFIG_PAX_REFCOUNT
67704 +void pax_report_refcount_overflow(struct pt_regs *regs)
67705 +{
67706 + if (current->signal->curr_ip)
67707 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
67708 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
67709 + else
67710 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
67711 + current->comm, task_pid_nr(current), current_uid(), current_euid());
67712 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
67713 + show_regs(regs);
67714 + force_sig_specific(SIGKILL, current);
67715 +}
67716 +#endif
67717 +
67718 +#ifdef CONFIG_PAX_USERCOPY
67719 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
67720 +int object_is_on_stack(const void *obj, unsigned long len)
67721 +{
67722 + const void * const stack = task_stack_page(current);
67723 + const void * const stackend = stack + THREAD_SIZE;
67724 +
67725 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
67726 + const void *frame = NULL;
67727 + const void *oldframe;
67728 +#endif
67729 +
67730 + if (obj + len < obj)
67731 + return -1;
67732 +
67733 + if (obj + len <= stack || stackend <= obj)
67734 + return 0;
67735 +
67736 + if (obj < stack || stackend < obj + len)
67737 + return -1;
67738 +
67739 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
67740 + oldframe = __builtin_frame_address(1);
67741 + if (oldframe)
67742 + frame = __builtin_frame_address(2);
67743 + /*
67744 + low ----------------------------------------------> high
67745 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
67746 + ^----------------^
67747 + allow copies only within here
67748 + */
67749 + while (stack <= frame && frame < stackend) {
67750 + /* if obj + len extends past the last frame, this
67751 + check won't pass and the next frame will be 0,
67752 + causing us to bail out and correctly report
67753 + the copy as invalid
67754 + */
67755 + if (obj + len <= frame)
67756 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
67757 + oldframe = frame;
67758 + frame = *(const void * const *)frame;
67759 + }
67760 + return -1;
67761 +#else
67762 + return 1;
67763 +#endif
67764 +}
67765 +
67766 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
67767 +{
67768 + if (current->signal->curr_ip)
67769 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
67770 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
67771 + else
67772 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
67773 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
67774 +
67775 + dump_stack();
67776 + gr_handle_kernel_exploit();
67777 + do_group_exit(SIGKILL);
67778 +}
67779 +#endif
67780 +
67781 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
67782 +void pax_track_stack(void)
67783 +{
67784 + unsigned long sp = (unsigned long)&sp;
67785 + if (sp < current_thread_info()->lowest_stack &&
67786 + sp > (unsigned long)task_stack_page(current))
67787 + current_thread_info()->lowest_stack = sp;
67788 +}
67789 +EXPORT_SYMBOL(pax_track_stack);
67790 +#endif
67791 +
67792 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
67793 +void report_size_overflow(const char *file, unsigned int line, const char *func)
67794 +{
67795 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
67796 + dump_stack();
67797 + do_group_exit(SIGKILL);
67798 +}
67799 +EXPORT_SYMBOL(report_size_overflow);
67800 +#endif
67801 +
67802 static int zap_process(struct task_struct *start)
67803 {
67804 struct task_struct *t;
67805 @@ -1793,17 +2164,17 @@ static void wait_for_dump_helpers(struct file *file)
67806 pipe = file->f_path.dentry->d_inode->i_pipe;
67807
67808 pipe_lock(pipe);
67809 - pipe->readers++;
67810 - pipe->writers--;
67811 + atomic_inc(&pipe->readers);
67812 + atomic_dec(&pipe->writers);
67813
67814 - while ((pipe->readers > 1) && (!signal_pending(current))) {
67815 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
67816 wake_up_interruptible_sync(&pipe->wait);
67817 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
67818 pipe_wait(pipe);
67819 }
67820
67821 - pipe->readers--;
67822 - pipe->writers++;
67823 + atomic_dec(&pipe->readers);
67824 + atomic_inc(&pipe->writers);
67825 pipe_unlock(pipe);
67826
67827 }
67828 @@ -1826,10 +2197,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67829 char **helper_argv = NULL;
67830 int helper_argc = 0;
67831 int dump_count = 0;
67832 - static atomic_t core_dump_count = ATOMIC_INIT(0);
67833 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
67834
67835 audit_core_dumps(signr);
67836
67837 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
67838 + gr_handle_brute_attach(current, mm->flags);
67839 +
67840 binfmt = mm->binfmt;
67841 if (!binfmt || !binfmt->core_dump)
67842 goto fail;
67843 @@ -1874,6 +2248,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67844 */
67845 clear_thread_flag(TIF_SIGPENDING);
67846
67847 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
67848 +
67849 /*
67850 * lock_kernel() because format_corename() is controlled by sysctl, which
67851 * uses lock_kernel()
67852 @@ -1908,7 +2284,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67853 goto fail_unlock;
67854 }
67855
67856 - dump_count = atomic_inc_return(&core_dump_count);
67857 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
67858 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
67859 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
67860 task_tgid_vnr(current), current->comm);
67861 @@ -1972,7 +2348,7 @@ close_fail:
67862 filp_close(file, NULL);
67863 fail_dropcount:
67864 if (dump_count)
67865 - atomic_dec(&core_dump_count);
67866 + atomic_dec_unchecked(&core_dump_count);
67867 fail_unlock:
67868 if (helper_argv)
67869 argv_free(helper_argv);
67870 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
67871 index 7f8d2e5..a1abdbb 100644
67872 --- a/fs/ext2/balloc.c
67873 +++ b/fs/ext2/balloc.c
67874 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
67875
67876 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
67877 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
67878 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
67879 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
67880 sbi->s_resuid != current_fsuid() &&
67881 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
67882 return 0;
67883 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
67884 index 27967f9..9f2a5fb 100644
67885 --- a/fs/ext3/balloc.c
67886 +++ b/fs/ext3/balloc.c
67887 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
67888
67889 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
67890 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
67891 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
67892 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
67893 sbi->s_resuid != current_fsuid() &&
67894 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
67895 return 0;
67896 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
67897 index e85b63c..80398e6 100644
67898 --- a/fs/ext4/balloc.c
67899 +++ b/fs/ext4/balloc.c
67900 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
67901 /* Hm, nope. Are (enough) root reserved blocks available? */
67902 if (sbi->s_resuid == current_fsuid() ||
67903 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
67904 - capable(CAP_SYS_RESOURCE)) {
67905 + capable_nolog(CAP_SYS_RESOURCE)) {
67906 if (free_blocks >= (nblocks + dirty_blocks))
67907 return 1;
67908 }
67909 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
67910 index 67c46ed..1f237e5 100644
67911 --- a/fs/ext4/ext4.h
67912 +++ b/fs/ext4/ext4.h
67913 @@ -1077,19 +1077,19 @@ struct ext4_sb_info {
67914
67915 /* stats for buddy allocator */
67916 spinlock_t s_mb_pa_lock;
67917 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
67918 - atomic_t s_bal_success; /* we found long enough chunks */
67919 - atomic_t s_bal_allocated; /* in blocks */
67920 - atomic_t s_bal_ex_scanned; /* total extents scanned */
67921 - atomic_t s_bal_goals; /* goal hits */
67922 - atomic_t s_bal_breaks; /* too long searches */
67923 - atomic_t s_bal_2orders; /* 2^order hits */
67924 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
67925 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
67926 + atomic_unchecked_t s_bal_allocated; /* in blocks */
67927 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
67928 + atomic_unchecked_t s_bal_goals; /* goal hits */
67929 + atomic_unchecked_t s_bal_breaks; /* too long searches */
67930 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
67931 spinlock_t s_bal_lock;
67932 unsigned long s_mb_buddies_generated;
67933 unsigned long long s_mb_generation_time;
67934 - atomic_t s_mb_lost_chunks;
67935 - atomic_t s_mb_preallocated;
67936 - atomic_t s_mb_discarded;
67937 + atomic_unchecked_t s_mb_lost_chunks;
67938 + atomic_unchecked_t s_mb_preallocated;
67939 + atomic_unchecked_t s_mb_discarded;
67940 atomic_t s_lock_busy;
67941
67942 /* locality groups */
67943 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
67944 index 2a60541..7439d61 100644
67945 --- a/fs/ext4/file.c
67946 +++ b/fs/ext4/file.c
67947 @@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
67948 cp = d_path(&path, buf, sizeof(buf));
67949 path_put(&path);
67950 if (!IS_ERR(cp)) {
67951 - memcpy(sbi->s_es->s_last_mounted, cp,
67952 - sizeof(sbi->s_es->s_last_mounted));
67953 + strlcpy(sbi->s_es->s_last_mounted, cp,
67954 + sizeof(sbi->s_es->s_last_mounted));
67955 sb->s_dirt = 1;
67956 }
67957 }
67958 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
67959 index 42bac1b..0aab9d8 100644
67960 --- a/fs/ext4/mballoc.c
67961 +++ b/fs/ext4/mballoc.c
67962 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
67963 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
67964
67965 if (EXT4_SB(sb)->s_mb_stats)
67966 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
67967 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
67968
67969 break;
67970 }
67971 @@ -2131,7 +2131,7 @@ repeat:
67972 ac->ac_status = AC_STATUS_CONTINUE;
67973 ac->ac_flags |= EXT4_MB_HINT_FIRST;
67974 cr = 3;
67975 - atomic_inc(&sbi->s_mb_lost_chunks);
67976 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
67977 goto repeat;
67978 }
67979 }
67980 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
67981 ext4_grpblk_t counters[16];
67982 } sg;
67983
67984 + pax_track_stack();
67985 +
67986 group--;
67987 if (group == 0)
67988 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
67989 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
67990 if (sbi->s_mb_stats) {
67991 printk(KERN_INFO
67992 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
67993 - atomic_read(&sbi->s_bal_allocated),
67994 - atomic_read(&sbi->s_bal_reqs),
67995 - atomic_read(&sbi->s_bal_success));
67996 + atomic_read_unchecked(&sbi->s_bal_allocated),
67997 + atomic_read_unchecked(&sbi->s_bal_reqs),
67998 + atomic_read_unchecked(&sbi->s_bal_success));
67999 printk(KERN_INFO
68000 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
68001 "%u 2^N hits, %u breaks, %u lost\n",
68002 - atomic_read(&sbi->s_bal_ex_scanned),
68003 - atomic_read(&sbi->s_bal_goals),
68004 - atomic_read(&sbi->s_bal_2orders),
68005 - atomic_read(&sbi->s_bal_breaks),
68006 - atomic_read(&sbi->s_mb_lost_chunks));
68007 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
68008 + atomic_read_unchecked(&sbi->s_bal_goals),
68009 + atomic_read_unchecked(&sbi->s_bal_2orders),
68010 + atomic_read_unchecked(&sbi->s_bal_breaks),
68011 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
68012 printk(KERN_INFO
68013 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
68014 sbi->s_mb_buddies_generated++,
68015 sbi->s_mb_generation_time);
68016 printk(KERN_INFO
68017 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
68018 - atomic_read(&sbi->s_mb_preallocated),
68019 - atomic_read(&sbi->s_mb_discarded));
68020 + atomic_read_unchecked(&sbi->s_mb_preallocated),
68021 + atomic_read_unchecked(&sbi->s_mb_discarded));
68022 }
68023
68024 free_percpu(sbi->s_locality_groups);
68025 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
68026 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
68027
68028 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
68029 - atomic_inc(&sbi->s_bal_reqs);
68030 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
68031 + atomic_inc_unchecked(&sbi->s_bal_reqs);
68032 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
68033 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
68034 - atomic_inc(&sbi->s_bal_success);
68035 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
68036 + atomic_inc_unchecked(&sbi->s_bal_success);
68037 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
68038 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
68039 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
68040 - atomic_inc(&sbi->s_bal_goals);
68041 + atomic_inc_unchecked(&sbi->s_bal_goals);
68042 if (ac->ac_found > sbi->s_mb_max_to_scan)
68043 - atomic_inc(&sbi->s_bal_breaks);
68044 + atomic_inc_unchecked(&sbi->s_bal_breaks);
68045 }
68046
68047 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
68048 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
68049 trace_ext4_mb_new_inode_pa(ac, pa);
68050
68051 ext4_mb_use_inode_pa(ac, pa);
68052 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68053 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68054
68055 ei = EXT4_I(ac->ac_inode);
68056 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
68057 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
68058 trace_ext4_mb_new_group_pa(ac, pa);
68059
68060 ext4_mb_use_group_pa(ac, pa);
68061 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68062 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68063
68064 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
68065 lg = ac->ac_lg;
68066 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
68067 * from the bitmap and continue.
68068 */
68069 }
68070 - atomic_add(free, &sbi->s_mb_discarded);
68071 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
68072
68073 return err;
68074 }
68075 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
68076 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
68077 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
68078 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
68079 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
68080 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
68081
68082 if (ac) {
68083 ac->ac_sb = sb;
68084 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
68085 index f1e7077..edd86b2 100644
68086 --- a/fs/ext4/super.c
68087 +++ b/fs/ext4/super.c
68088 @@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
68089 }
68090
68091
68092 -static struct sysfs_ops ext4_attr_ops = {
68093 +static const struct sysfs_ops ext4_attr_ops = {
68094 .show = ext4_attr_show,
68095 .store = ext4_attr_store,
68096 };
68097 diff --git a/fs/fcntl.c b/fs/fcntl.c
68098 index 97e01dc..e9aab2d 100644
68099 --- a/fs/fcntl.c
68100 +++ b/fs/fcntl.c
68101 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
68102 if (err)
68103 return err;
68104
68105 + if (gr_handle_chroot_fowner(pid, type))
68106 + return -ENOENT;
68107 + if (gr_check_protected_task_fowner(pid, type))
68108 + return -EACCES;
68109 +
68110 f_modown(filp, pid, type, force);
68111 return 0;
68112 }
68113 @@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
68114
68115 static int f_setown_ex(struct file *filp, unsigned long arg)
68116 {
68117 - struct f_owner_ex * __user owner_p = (void * __user)arg;
68118 + struct f_owner_ex __user *owner_p = (void __user *)arg;
68119 struct f_owner_ex owner;
68120 struct pid *pid;
68121 int type;
68122 @@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
68123
68124 static int f_getown_ex(struct file *filp, unsigned long arg)
68125 {
68126 - struct f_owner_ex * __user owner_p = (void * __user)arg;
68127 + struct f_owner_ex __user *owner_p = (void __user *)arg;
68128 struct f_owner_ex owner;
68129 int ret = 0;
68130
68131 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
68132 switch (cmd) {
68133 case F_DUPFD:
68134 case F_DUPFD_CLOEXEC:
68135 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
68136 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
68137 break;
68138 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
68139 diff --git a/fs/fifo.c b/fs/fifo.c
68140 index f8f97b8..b1f2259 100644
68141 --- a/fs/fifo.c
68142 +++ b/fs/fifo.c
68143 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
68144 */
68145 filp->f_op = &read_pipefifo_fops;
68146 pipe->r_counter++;
68147 - if (pipe->readers++ == 0)
68148 + if (atomic_inc_return(&pipe->readers) == 1)
68149 wake_up_partner(inode);
68150
68151 - if (!pipe->writers) {
68152 + if (!atomic_read(&pipe->writers)) {
68153 if ((filp->f_flags & O_NONBLOCK)) {
68154 /* suppress POLLHUP until we have
68155 * seen a writer */
68156 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
68157 * errno=ENXIO when there is no process reading the FIFO.
68158 */
68159 ret = -ENXIO;
68160 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
68161 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
68162 goto err;
68163
68164 filp->f_op = &write_pipefifo_fops;
68165 pipe->w_counter++;
68166 - if (!pipe->writers++)
68167 + if (atomic_inc_return(&pipe->writers) == 1)
68168 wake_up_partner(inode);
68169
68170 - if (!pipe->readers) {
68171 + if (!atomic_read(&pipe->readers)) {
68172 wait_for_partner(inode, &pipe->r_counter);
68173 if (signal_pending(current))
68174 goto err_wr;
68175 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
68176 */
68177 filp->f_op = &rdwr_pipefifo_fops;
68178
68179 - pipe->readers++;
68180 - pipe->writers++;
68181 + atomic_inc(&pipe->readers);
68182 + atomic_inc(&pipe->writers);
68183 pipe->r_counter++;
68184 pipe->w_counter++;
68185 - if (pipe->readers == 1 || pipe->writers == 1)
68186 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
68187 wake_up_partner(inode);
68188 break;
68189
68190 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
68191 return 0;
68192
68193 err_rd:
68194 - if (!--pipe->readers)
68195 + if (atomic_dec_and_test(&pipe->readers))
68196 wake_up_interruptible(&pipe->wait);
68197 ret = -ERESTARTSYS;
68198 goto err;
68199
68200 err_wr:
68201 - if (!--pipe->writers)
68202 + if (atomic_dec_and_test(&pipe->writers))
68203 wake_up_interruptible(&pipe->wait);
68204 ret = -ERESTARTSYS;
68205 goto err;
68206
68207 err:
68208 - if (!pipe->readers && !pipe->writers)
68209 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
68210 free_pipe_info(inode);
68211
68212 err_nocleanup:
68213 diff --git a/fs/file.c b/fs/file.c
68214 index 87e1290..a930cc4 100644
68215 --- a/fs/file.c
68216 +++ b/fs/file.c
68217 @@ -14,6 +14,7 @@
68218 #include <linux/slab.h>
68219 #include <linux/vmalloc.h>
68220 #include <linux/file.h>
68221 +#include <linux/security.h>
68222 #include <linux/fdtable.h>
68223 #include <linux/bitops.h>
68224 #include <linux/interrupt.h>
68225 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
68226 * N.B. For clone tasks sharing a files structure, this test
68227 * will limit the total number of files that can be opened.
68228 */
68229 +
68230 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
68231 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
68232 return -EMFILE;
68233
68234 diff --git a/fs/filesystems.c b/fs/filesystems.c
68235 index a24c58e..53f91ee 100644
68236 --- a/fs/filesystems.c
68237 +++ b/fs/filesystems.c
68238 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
68239 int len = dot ? dot - name : strlen(name);
68240
68241 fs = __get_fs_type(name, len);
68242 +
68243 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68244 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
68245 +#else
68246 if (!fs && (request_module("%.*s", len, name) == 0))
68247 +#endif
68248 fs = __get_fs_type(name, len);
68249
68250 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
68251 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
68252 index eee0590..1181166 100644
68253 --- a/fs/fs_struct.c
68254 +++ b/fs/fs_struct.c
68255 @@ -4,6 +4,7 @@
68256 #include <linux/path.h>
68257 #include <linux/slab.h>
68258 #include <linux/fs_struct.h>
68259 +#include <linux/grsecurity.h>
68260
68261 /*
68262 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
68263 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
68264 old_root = fs->root;
68265 fs->root = *path;
68266 path_get(path);
68267 + gr_set_chroot_entries(current, path);
68268 write_unlock(&fs->lock);
68269 if (old_root.dentry)
68270 path_put(&old_root);
68271 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
68272 && fs->root.mnt == old_root->mnt) {
68273 path_get(new_root);
68274 fs->root = *new_root;
68275 + gr_set_chroot_entries(p, new_root);
68276 count++;
68277 }
68278 if (fs->pwd.dentry == old_root->dentry
68279 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
68280 task_lock(tsk);
68281 write_lock(&fs->lock);
68282 tsk->fs = NULL;
68283 - kill = !--fs->users;
68284 + gr_clear_chroot_entries(tsk);
68285 + kill = !atomic_dec_return(&fs->users);
68286 write_unlock(&fs->lock);
68287 task_unlock(tsk);
68288 if (kill)
68289 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
68290 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
68291 /* We don't need to lock fs - think why ;-) */
68292 if (fs) {
68293 - fs->users = 1;
68294 + atomic_set(&fs->users, 1);
68295 fs->in_exec = 0;
68296 rwlock_init(&fs->lock);
68297 fs->umask = old->umask;
68298 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
68299
68300 task_lock(current);
68301 write_lock(&fs->lock);
68302 - kill = !--fs->users;
68303 + kill = !atomic_dec_return(&fs->users);
68304 current->fs = new_fs;
68305 + gr_set_chroot_entries(current, &new_fs->root);
68306 write_unlock(&fs->lock);
68307 task_unlock(current);
68308
68309 @@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
68310
68311 int current_umask(void)
68312 {
68313 - return current->fs->umask;
68314 + return current->fs->umask | gr_acl_umask();
68315 }
68316 EXPORT_SYMBOL(current_umask);
68317
68318 /* to be mentioned only in INIT_TASK */
68319 struct fs_struct init_fs = {
68320 - .users = 1,
68321 + .users = ATOMIC_INIT(1),
68322 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
68323 .umask = 0022,
68324 };
68325 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
68326 task_lock(current);
68327
68328 write_lock(&init_fs.lock);
68329 - init_fs.users++;
68330 + atomic_inc(&init_fs.users);
68331 write_unlock(&init_fs.lock);
68332
68333 write_lock(&fs->lock);
68334 current->fs = &init_fs;
68335 - kill = !--fs->users;
68336 + gr_set_chroot_entries(current, &current->fs->root);
68337 + kill = !atomic_dec_return(&fs->users);
68338 write_unlock(&fs->lock);
68339
68340 task_unlock(current);
68341 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
68342 index 9905350..02eaec4 100644
68343 --- a/fs/fscache/cookie.c
68344 +++ b/fs/fscache/cookie.c
68345 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
68346 parent ? (char *) parent->def->name : "<no-parent>",
68347 def->name, netfs_data);
68348
68349 - fscache_stat(&fscache_n_acquires);
68350 + fscache_stat_unchecked(&fscache_n_acquires);
68351
68352 /* if there's no parent cookie, then we don't create one here either */
68353 if (!parent) {
68354 - fscache_stat(&fscache_n_acquires_null);
68355 + fscache_stat_unchecked(&fscache_n_acquires_null);
68356 _leave(" [no parent]");
68357 return NULL;
68358 }
68359 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
68360 /* allocate and initialise a cookie */
68361 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
68362 if (!cookie) {
68363 - fscache_stat(&fscache_n_acquires_oom);
68364 + fscache_stat_unchecked(&fscache_n_acquires_oom);
68365 _leave(" [ENOMEM]");
68366 return NULL;
68367 }
68368 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
68369
68370 switch (cookie->def->type) {
68371 case FSCACHE_COOKIE_TYPE_INDEX:
68372 - fscache_stat(&fscache_n_cookie_index);
68373 + fscache_stat_unchecked(&fscache_n_cookie_index);
68374 break;
68375 case FSCACHE_COOKIE_TYPE_DATAFILE:
68376 - fscache_stat(&fscache_n_cookie_data);
68377 + fscache_stat_unchecked(&fscache_n_cookie_data);
68378 break;
68379 default:
68380 - fscache_stat(&fscache_n_cookie_special);
68381 + fscache_stat_unchecked(&fscache_n_cookie_special);
68382 break;
68383 }
68384
68385 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
68386 if (fscache_acquire_non_index_cookie(cookie) < 0) {
68387 atomic_dec(&parent->n_children);
68388 __fscache_cookie_put(cookie);
68389 - fscache_stat(&fscache_n_acquires_nobufs);
68390 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
68391 _leave(" = NULL");
68392 return NULL;
68393 }
68394 }
68395
68396 - fscache_stat(&fscache_n_acquires_ok);
68397 + fscache_stat_unchecked(&fscache_n_acquires_ok);
68398 _leave(" = %p", cookie);
68399 return cookie;
68400 }
68401 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
68402 cache = fscache_select_cache_for_object(cookie->parent);
68403 if (!cache) {
68404 up_read(&fscache_addremove_sem);
68405 - fscache_stat(&fscache_n_acquires_no_cache);
68406 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
68407 _leave(" = -ENOMEDIUM [no cache]");
68408 return -ENOMEDIUM;
68409 }
68410 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
68411 object = cache->ops->alloc_object(cache, cookie);
68412 fscache_stat_d(&fscache_n_cop_alloc_object);
68413 if (IS_ERR(object)) {
68414 - fscache_stat(&fscache_n_object_no_alloc);
68415 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
68416 ret = PTR_ERR(object);
68417 goto error;
68418 }
68419
68420 - fscache_stat(&fscache_n_object_alloc);
68421 + fscache_stat_unchecked(&fscache_n_object_alloc);
68422
68423 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
68424
68425 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
68426 struct fscache_object *object;
68427 struct hlist_node *_p;
68428
68429 - fscache_stat(&fscache_n_updates);
68430 + fscache_stat_unchecked(&fscache_n_updates);
68431
68432 if (!cookie) {
68433 - fscache_stat(&fscache_n_updates_null);
68434 + fscache_stat_unchecked(&fscache_n_updates_null);
68435 _leave(" [no cookie]");
68436 return;
68437 }
68438 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
68439 struct fscache_object *object;
68440 unsigned long event;
68441
68442 - fscache_stat(&fscache_n_relinquishes);
68443 + fscache_stat_unchecked(&fscache_n_relinquishes);
68444 if (retire)
68445 - fscache_stat(&fscache_n_relinquishes_retire);
68446 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
68447
68448 if (!cookie) {
68449 - fscache_stat(&fscache_n_relinquishes_null);
68450 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
68451 _leave(" [no cookie]");
68452 return;
68453 }
68454 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
68455
68456 /* wait for the cookie to finish being instantiated (or to fail) */
68457 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
68458 - fscache_stat(&fscache_n_relinquishes_waitcrt);
68459 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
68460 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
68461 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
68462 }
68463 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
68464 index edd7434..0725e66 100644
68465 --- a/fs/fscache/internal.h
68466 +++ b/fs/fscache/internal.h
68467 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
68468 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
68469 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
68470
68471 -extern atomic_t fscache_n_op_pend;
68472 -extern atomic_t fscache_n_op_run;
68473 -extern atomic_t fscache_n_op_enqueue;
68474 -extern atomic_t fscache_n_op_deferred_release;
68475 -extern atomic_t fscache_n_op_release;
68476 -extern atomic_t fscache_n_op_gc;
68477 -extern atomic_t fscache_n_op_cancelled;
68478 -extern atomic_t fscache_n_op_rejected;
68479 +extern atomic_unchecked_t fscache_n_op_pend;
68480 +extern atomic_unchecked_t fscache_n_op_run;
68481 +extern atomic_unchecked_t fscache_n_op_enqueue;
68482 +extern atomic_unchecked_t fscache_n_op_deferred_release;
68483 +extern atomic_unchecked_t fscache_n_op_release;
68484 +extern atomic_unchecked_t fscache_n_op_gc;
68485 +extern atomic_unchecked_t fscache_n_op_cancelled;
68486 +extern atomic_unchecked_t fscache_n_op_rejected;
68487
68488 -extern atomic_t fscache_n_attr_changed;
68489 -extern atomic_t fscache_n_attr_changed_ok;
68490 -extern atomic_t fscache_n_attr_changed_nobufs;
68491 -extern atomic_t fscache_n_attr_changed_nomem;
68492 -extern atomic_t fscache_n_attr_changed_calls;
68493 +extern atomic_unchecked_t fscache_n_attr_changed;
68494 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
68495 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
68496 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
68497 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
68498
68499 -extern atomic_t fscache_n_allocs;
68500 -extern atomic_t fscache_n_allocs_ok;
68501 -extern atomic_t fscache_n_allocs_wait;
68502 -extern atomic_t fscache_n_allocs_nobufs;
68503 -extern atomic_t fscache_n_allocs_intr;
68504 -extern atomic_t fscache_n_allocs_object_dead;
68505 -extern atomic_t fscache_n_alloc_ops;
68506 -extern atomic_t fscache_n_alloc_op_waits;
68507 +extern atomic_unchecked_t fscache_n_allocs;
68508 +extern atomic_unchecked_t fscache_n_allocs_ok;
68509 +extern atomic_unchecked_t fscache_n_allocs_wait;
68510 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
68511 +extern atomic_unchecked_t fscache_n_allocs_intr;
68512 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
68513 +extern atomic_unchecked_t fscache_n_alloc_ops;
68514 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
68515
68516 -extern atomic_t fscache_n_retrievals;
68517 -extern atomic_t fscache_n_retrievals_ok;
68518 -extern atomic_t fscache_n_retrievals_wait;
68519 -extern atomic_t fscache_n_retrievals_nodata;
68520 -extern atomic_t fscache_n_retrievals_nobufs;
68521 -extern atomic_t fscache_n_retrievals_intr;
68522 -extern atomic_t fscache_n_retrievals_nomem;
68523 -extern atomic_t fscache_n_retrievals_object_dead;
68524 -extern atomic_t fscache_n_retrieval_ops;
68525 -extern atomic_t fscache_n_retrieval_op_waits;
68526 +extern atomic_unchecked_t fscache_n_retrievals;
68527 +extern atomic_unchecked_t fscache_n_retrievals_ok;
68528 +extern atomic_unchecked_t fscache_n_retrievals_wait;
68529 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
68530 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
68531 +extern atomic_unchecked_t fscache_n_retrievals_intr;
68532 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
68533 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
68534 +extern atomic_unchecked_t fscache_n_retrieval_ops;
68535 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
68536
68537 -extern atomic_t fscache_n_stores;
68538 -extern atomic_t fscache_n_stores_ok;
68539 -extern atomic_t fscache_n_stores_again;
68540 -extern atomic_t fscache_n_stores_nobufs;
68541 -extern atomic_t fscache_n_stores_oom;
68542 -extern atomic_t fscache_n_store_ops;
68543 -extern atomic_t fscache_n_store_calls;
68544 -extern atomic_t fscache_n_store_pages;
68545 -extern atomic_t fscache_n_store_radix_deletes;
68546 -extern atomic_t fscache_n_store_pages_over_limit;
68547 +extern atomic_unchecked_t fscache_n_stores;
68548 +extern atomic_unchecked_t fscache_n_stores_ok;
68549 +extern atomic_unchecked_t fscache_n_stores_again;
68550 +extern atomic_unchecked_t fscache_n_stores_nobufs;
68551 +extern atomic_unchecked_t fscache_n_stores_oom;
68552 +extern atomic_unchecked_t fscache_n_store_ops;
68553 +extern atomic_unchecked_t fscache_n_store_calls;
68554 +extern atomic_unchecked_t fscache_n_store_pages;
68555 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
68556 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
68557
68558 -extern atomic_t fscache_n_store_vmscan_not_storing;
68559 -extern atomic_t fscache_n_store_vmscan_gone;
68560 -extern atomic_t fscache_n_store_vmscan_busy;
68561 -extern atomic_t fscache_n_store_vmscan_cancelled;
68562 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
68563 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
68564 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
68565 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
68566
68567 -extern atomic_t fscache_n_marks;
68568 -extern atomic_t fscache_n_uncaches;
68569 +extern atomic_unchecked_t fscache_n_marks;
68570 +extern atomic_unchecked_t fscache_n_uncaches;
68571
68572 -extern atomic_t fscache_n_acquires;
68573 -extern atomic_t fscache_n_acquires_null;
68574 -extern atomic_t fscache_n_acquires_no_cache;
68575 -extern atomic_t fscache_n_acquires_ok;
68576 -extern atomic_t fscache_n_acquires_nobufs;
68577 -extern atomic_t fscache_n_acquires_oom;
68578 +extern atomic_unchecked_t fscache_n_acquires;
68579 +extern atomic_unchecked_t fscache_n_acquires_null;
68580 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
68581 +extern atomic_unchecked_t fscache_n_acquires_ok;
68582 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
68583 +extern atomic_unchecked_t fscache_n_acquires_oom;
68584
68585 -extern atomic_t fscache_n_updates;
68586 -extern atomic_t fscache_n_updates_null;
68587 -extern atomic_t fscache_n_updates_run;
68588 +extern atomic_unchecked_t fscache_n_updates;
68589 +extern atomic_unchecked_t fscache_n_updates_null;
68590 +extern atomic_unchecked_t fscache_n_updates_run;
68591
68592 -extern atomic_t fscache_n_relinquishes;
68593 -extern atomic_t fscache_n_relinquishes_null;
68594 -extern atomic_t fscache_n_relinquishes_waitcrt;
68595 -extern atomic_t fscache_n_relinquishes_retire;
68596 +extern atomic_unchecked_t fscache_n_relinquishes;
68597 +extern atomic_unchecked_t fscache_n_relinquishes_null;
68598 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
68599 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
68600
68601 -extern atomic_t fscache_n_cookie_index;
68602 -extern atomic_t fscache_n_cookie_data;
68603 -extern atomic_t fscache_n_cookie_special;
68604 +extern atomic_unchecked_t fscache_n_cookie_index;
68605 +extern atomic_unchecked_t fscache_n_cookie_data;
68606 +extern atomic_unchecked_t fscache_n_cookie_special;
68607
68608 -extern atomic_t fscache_n_object_alloc;
68609 -extern atomic_t fscache_n_object_no_alloc;
68610 -extern atomic_t fscache_n_object_lookups;
68611 -extern atomic_t fscache_n_object_lookups_negative;
68612 -extern atomic_t fscache_n_object_lookups_positive;
68613 -extern atomic_t fscache_n_object_lookups_timed_out;
68614 -extern atomic_t fscache_n_object_created;
68615 -extern atomic_t fscache_n_object_avail;
68616 -extern atomic_t fscache_n_object_dead;
68617 +extern atomic_unchecked_t fscache_n_object_alloc;
68618 +extern atomic_unchecked_t fscache_n_object_no_alloc;
68619 +extern atomic_unchecked_t fscache_n_object_lookups;
68620 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
68621 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
68622 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
68623 +extern atomic_unchecked_t fscache_n_object_created;
68624 +extern atomic_unchecked_t fscache_n_object_avail;
68625 +extern atomic_unchecked_t fscache_n_object_dead;
68626
68627 -extern atomic_t fscache_n_checkaux_none;
68628 -extern atomic_t fscache_n_checkaux_okay;
68629 -extern atomic_t fscache_n_checkaux_update;
68630 -extern atomic_t fscache_n_checkaux_obsolete;
68631 +extern atomic_unchecked_t fscache_n_checkaux_none;
68632 +extern atomic_unchecked_t fscache_n_checkaux_okay;
68633 +extern atomic_unchecked_t fscache_n_checkaux_update;
68634 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
68635
68636 extern atomic_t fscache_n_cop_alloc_object;
68637 extern atomic_t fscache_n_cop_lookup_object;
68638 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
68639 atomic_inc(stat);
68640 }
68641
68642 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
68643 +{
68644 + atomic_inc_unchecked(stat);
68645 +}
68646 +
68647 static inline void fscache_stat_d(atomic_t *stat)
68648 {
68649 atomic_dec(stat);
68650 @@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
68651
68652 #define __fscache_stat(stat) (NULL)
68653 #define fscache_stat(stat) do {} while (0)
68654 +#define fscache_stat_unchecked(stat) do {} while (0)
68655 #define fscache_stat_d(stat) do {} while (0)
68656 #endif
68657
68658 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
68659 index e513ac5..e888d34 100644
68660 --- a/fs/fscache/object.c
68661 +++ b/fs/fscache/object.c
68662 @@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68663 /* update the object metadata on disk */
68664 case FSCACHE_OBJECT_UPDATING:
68665 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
68666 - fscache_stat(&fscache_n_updates_run);
68667 + fscache_stat_unchecked(&fscache_n_updates_run);
68668 fscache_stat(&fscache_n_cop_update_object);
68669 object->cache->ops->update_object(object);
68670 fscache_stat_d(&fscache_n_cop_update_object);
68671 @@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68672 spin_lock(&object->lock);
68673 object->state = FSCACHE_OBJECT_DEAD;
68674 spin_unlock(&object->lock);
68675 - fscache_stat(&fscache_n_object_dead);
68676 + fscache_stat_unchecked(&fscache_n_object_dead);
68677 goto terminal_transit;
68678
68679 /* handle the parent cache of this object being withdrawn from
68680 @@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68681 spin_lock(&object->lock);
68682 object->state = FSCACHE_OBJECT_DEAD;
68683 spin_unlock(&object->lock);
68684 - fscache_stat(&fscache_n_object_dead);
68685 + fscache_stat_unchecked(&fscache_n_object_dead);
68686 goto terminal_transit;
68687
68688 /* complain about the object being woken up once it is
68689 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
68690 parent->cookie->def->name, cookie->def->name,
68691 object->cache->tag->name);
68692
68693 - fscache_stat(&fscache_n_object_lookups);
68694 + fscache_stat_unchecked(&fscache_n_object_lookups);
68695 fscache_stat(&fscache_n_cop_lookup_object);
68696 ret = object->cache->ops->lookup_object(object);
68697 fscache_stat_d(&fscache_n_cop_lookup_object);
68698 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
68699 if (ret == -ETIMEDOUT) {
68700 /* probably stuck behind another object, so move this one to
68701 * the back of the queue */
68702 - fscache_stat(&fscache_n_object_lookups_timed_out);
68703 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
68704 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
68705 }
68706
68707 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
68708
68709 spin_lock(&object->lock);
68710 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
68711 - fscache_stat(&fscache_n_object_lookups_negative);
68712 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
68713
68714 /* transit here to allow write requests to begin stacking up
68715 * and read requests to begin returning ENODATA */
68716 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
68717 * result, in which case there may be data available */
68718 spin_lock(&object->lock);
68719 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
68720 - fscache_stat(&fscache_n_object_lookups_positive);
68721 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
68722
68723 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
68724
68725 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
68726 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
68727 } else {
68728 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
68729 - fscache_stat(&fscache_n_object_created);
68730 + fscache_stat_unchecked(&fscache_n_object_created);
68731
68732 object->state = FSCACHE_OBJECT_AVAILABLE;
68733 spin_unlock(&object->lock);
68734 @@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
68735 fscache_enqueue_dependents(object);
68736
68737 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
68738 - fscache_stat(&fscache_n_object_avail);
68739 + fscache_stat_unchecked(&fscache_n_object_avail);
68740
68741 _leave("");
68742 }
68743 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
68744 enum fscache_checkaux result;
68745
68746 if (!object->cookie->def->check_aux) {
68747 - fscache_stat(&fscache_n_checkaux_none);
68748 + fscache_stat_unchecked(&fscache_n_checkaux_none);
68749 return FSCACHE_CHECKAUX_OKAY;
68750 }
68751
68752 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
68753 switch (result) {
68754 /* entry okay as is */
68755 case FSCACHE_CHECKAUX_OKAY:
68756 - fscache_stat(&fscache_n_checkaux_okay);
68757 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
68758 break;
68759
68760 /* entry requires update */
68761 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
68762 - fscache_stat(&fscache_n_checkaux_update);
68763 + fscache_stat_unchecked(&fscache_n_checkaux_update);
68764 break;
68765
68766 /* entry requires deletion */
68767 case FSCACHE_CHECKAUX_OBSOLETE:
68768 - fscache_stat(&fscache_n_checkaux_obsolete);
68769 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
68770 break;
68771
68772 default:
68773 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
68774 index 313e79a..775240f 100644
68775 --- a/fs/fscache/operation.c
68776 +++ b/fs/fscache/operation.c
68777 @@ -16,7 +16,7 @@
68778 #include <linux/seq_file.h>
68779 #include "internal.h"
68780
68781 -atomic_t fscache_op_debug_id;
68782 +atomic_unchecked_t fscache_op_debug_id;
68783 EXPORT_SYMBOL(fscache_op_debug_id);
68784
68785 /**
68786 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
68787 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
68788 ASSERTCMP(atomic_read(&op->usage), >, 0);
68789
68790 - fscache_stat(&fscache_n_op_enqueue);
68791 + fscache_stat_unchecked(&fscache_n_op_enqueue);
68792 switch (op->flags & FSCACHE_OP_TYPE) {
68793 case FSCACHE_OP_FAST:
68794 _debug("queue fast");
68795 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
68796 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
68797 if (op->processor)
68798 fscache_enqueue_operation(op);
68799 - fscache_stat(&fscache_n_op_run);
68800 + fscache_stat_unchecked(&fscache_n_op_run);
68801 }
68802
68803 /*
68804 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
68805 if (object->n_ops > 0) {
68806 atomic_inc(&op->usage);
68807 list_add_tail(&op->pend_link, &object->pending_ops);
68808 - fscache_stat(&fscache_n_op_pend);
68809 + fscache_stat_unchecked(&fscache_n_op_pend);
68810 } else if (!list_empty(&object->pending_ops)) {
68811 atomic_inc(&op->usage);
68812 list_add_tail(&op->pend_link, &object->pending_ops);
68813 - fscache_stat(&fscache_n_op_pend);
68814 + fscache_stat_unchecked(&fscache_n_op_pend);
68815 fscache_start_operations(object);
68816 } else {
68817 ASSERTCMP(object->n_in_progress, ==, 0);
68818 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
68819 object->n_exclusive++; /* reads and writes must wait */
68820 atomic_inc(&op->usage);
68821 list_add_tail(&op->pend_link, &object->pending_ops);
68822 - fscache_stat(&fscache_n_op_pend);
68823 + fscache_stat_unchecked(&fscache_n_op_pend);
68824 ret = 0;
68825 } else {
68826 /* not allowed to submit ops in any other state */
68827 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
68828 if (object->n_exclusive > 0) {
68829 atomic_inc(&op->usage);
68830 list_add_tail(&op->pend_link, &object->pending_ops);
68831 - fscache_stat(&fscache_n_op_pend);
68832 + fscache_stat_unchecked(&fscache_n_op_pend);
68833 } else if (!list_empty(&object->pending_ops)) {
68834 atomic_inc(&op->usage);
68835 list_add_tail(&op->pend_link, &object->pending_ops);
68836 - fscache_stat(&fscache_n_op_pend);
68837 + fscache_stat_unchecked(&fscache_n_op_pend);
68838 fscache_start_operations(object);
68839 } else {
68840 ASSERTCMP(object->n_exclusive, ==, 0);
68841 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
68842 object->n_ops++;
68843 atomic_inc(&op->usage);
68844 list_add_tail(&op->pend_link, &object->pending_ops);
68845 - fscache_stat(&fscache_n_op_pend);
68846 + fscache_stat_unchecked(&fscache_n_op_pend);
68847 ret = 0;
68848 } else if (object->state == FSCACHE_OBJECT_DYING ||
68849 object->state == FSCACHE_OBJECT_LC_DYING ||
68850 object->state == FSCACHE_OBJECT_WITHDRAWING) {
68851 - fscache_stat(&fscache_n_op_rejected);
68852 + fscache_stat_unchecked(&fscache_n_op_rejected);
68853 ret = -ENOBUFS;
68854 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
68855 fscache_report_unexpected_submission(object, op, ostate);
68856 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
68857
68858 ret = -EBUSY;
68859 if (!list_empty(&op->pend_link)) {
68860 - fscache_stat(&fscache_n_op_cancelled);
68861 + fscache_stat_unchecked(&fscache_n_op_cancelled);
68862 list_del_init(&op->pend_link);
68863 object->n_ops--;
68864 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
68865 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
68866 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
68867 BUG();
68868
68869 - fscache_stat(&fscache_n_op_release);
68870 + fscache_stat_unchecked(&fscache_n_op_release);
68871
68872 if (op->release) {
68873 op->release(op);
68874 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
68875 * lock, and defer it otherwise */
68876 if (!spin_trylock(&object->lock)) {
68877 _debug("defer put");
68878 - fscache_stat(&fscache_n_op_deferred_release);
68879 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
68880
68881 cache = object->cache;
68882 spin_lock(&cache->op_gc_list_lock);
68883 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
68884
68885 _debug("GC DEFERRED REL OBJ%x OP%x",
68886 object->debug_id, op->debug_id);
68887 - fscache_stat(&fscache_n_op_gc);
68888 + fscache_stat_unchecked(&fscache_n_op_gc);
68889
68890 ASSERTCMP(atomic_read(&op->usage), ==, 0);
68891
68892 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
68893 index c598ea4..6aac13e 100644
68894 --- a/fs/fscache/page.c
68895 +++ b/fs/fscache/page.c
68896 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
68897 val = radix_tree_lookup(&cookie->stores, page->index);
68898 if (!val) {
68899 rcu_read_unlock();
68900 - fscache_stat(&fscache_n_store_vmscan_not_storing);
68901 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
68902 __fscache_uncache_page(cookie, page);
68903 return true;
68904 }
68905 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
68906 spin_unlock(&cookie->stores_lock);
68907
68908 if (xpage) {
68909 - fscache_stat(&fscache_n_store_vmscan_cancelled);
68910 - fscache_stat(&fscache_n_store_radix_deletes);
68911 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
68912 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
68913 ASSERTCMP(xpage, ==, page);
68914 } else {
68915 - fscache_stat(&fscache_n_store_vmscan_gone);
68916 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
68917 }
68918
68919 wake_up_bit(&cookie->flags, 0);
68920 @@ -106,7 +106,7 @@ page_busy:
68921 /* we might want to wait here, but that could deadlock the allocator as
68922 * the slow-work threads writing to the cache may all end up sleeping
68923 * on memory allocation */
68924 - fscache_stat(&fscache_n_store_vmscan_busy);
68925 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
68926 return false;
68927 }
68928 EXPORT_SYMBOL(__fscache_maybe_release_page);
68929 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
68930 FSCACHE_COOKIE_STORING_TAG);
68931 if (!radix_tree_tag_get(&cookie->stores, page->index,
68932 FSCACHE_COOKIE_PENDING_TAG)) {
68933 - fscache_stat(&fscache_n_store_radix_deletes);
68934 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
68935 xpage = radix_tree_delete(&cookie->stores, page->index);
68936 }
68937 spin_unlock(&cookie->stores_lock);
68938 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
68939
68940 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
68941
68942 - fscache_stat(&fscache_n_attr_changed_calls);
68943 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
68944
68945 if (fscache_object_is_active(object)) {
68946 fscache_set_op_state(op, "CallFS");
68947 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68948
68949 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
68950
68951 - fscache_stat(&fscache_n_attr_changed);
68952 + fscache_stat_unchecked(&fscache_n_attr_changed);
68953
68954 op = kzalloc(sizeof(*op), GFP_KERNEL);
68955 if (!op) {
68956 - fscache_stat(&fscache_n_attr_changed_nomem);
68957 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
68958 _leave(" = -ENOMEM");
68959 return -ENOMEM;
68960 }
68961 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68962 if (fscache_submit_exclusive_op(object, op) < 0)
68963 goto nobufs;
68964 spin_unlock(&cookie->lock);
68965 - fscache_stat(&fscache_n_attr_changed_ok);
68966 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
68967 fscache_put_operation(op);
68968 _leave(" = 0");
68969 return 0;
68970 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68971 nobufs:
68972 spin_unlock(&cookie->lock);
68973 kfree(op);
68974 - fscache_stat(&fscache_n_attr_changed_nobufs);
68975 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
68976 _leave(" = %d", -ENOBUFS);
68977 return -ENOBUFS;
68978 }
68979 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
68980 /* allocate a retrieval operation and attempt to submit it */
68981 op = kzalloc(sizeof(*op), GFP_NOIO);
68982 if (!op) {
68983 - fscache_stat(&fscache_n_retrievals_nomem);
68984 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
68985 return NULL;
68986 }
68987
68988 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
68989 return 0;
68990 }
68991
68992 - fscache_stat(&fscache_n_retrievals_wait);
68993 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
68994
68995 jif = jiffies;
68996 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
68997 fscache_wait_bit_interruptible,
68998 TASK_INTERRUPTIBLE) != 0) {
68999 - fscache_stat(&fscache_n_retrievals_intr);
69000 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
69001 _leave(" = -ERESTARTSYS");
69002 return -ERESTARTSYS;
69003 }
69004 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
69005 */
69006 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
69007 struct fscache_retrieval *op,
69008 - atomic_t *stat_op_waits,
69009 - atomic_t *stat_object_dead)
69010 + atomic_unchecked_t *stat_op_waits,
69011 + atomic_unchecked_t *stat_object_dead)
69012 {
69013 int ret;
69014
69015 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
69016 goto check_if_dead;
69017
69018 _debug(">>> WT");
69019 - fscache_stat(stat_op_waits);
69020 + fscache_stat_unchecked(stat_op_waits);
69021 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
69022 fscache_wait_bit_interruptible,
69023 TASK_INTERRUPTIBLE) < 0) {
69024 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
69025
69026 check_if_dead:
69027 if (unlikely(fscache_object_is_dead(object))) {
69028 - fscache_stat(stat_object_dead);
69029 + fscache_stat_unchecked(stat_object_dead);
69030 return -ENOBUFS;
69031 }
69032 return 0;
69033 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
69034
69035 _enter("%p,%p,,,", cookie, page);
69036
69037 - fscache_stat(&fscache_n_retrievals);
69038 + fscache_stat_unchecked(&fscache_n_retrievals);
69039
69040 if (hlist_empty(&cookie->backing_objects))
69041 goto nobufs;
69042 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
69043 goto nobufs_unlock;
69044 spin_unlock(&cookie->lock);
69045
69046 - fscache_stat(&fscache_n_retrieval_ops);
69047 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
69048
69049 /* pin the netfs read context in case we need to do the actual netfs
69050 * read because we've encountered a cache read failure */
69051 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
69052
69053 error:
69054 if (ret == -ENOMEM)
69055 - fscache_stat(&fscache_n_retrievals_nomem);
69056 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
69057 else if (ret == -ERESTARTSYS)
69058 - fscache_stat(&fscache_n_retrievals_intr);
69059 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
69060 else if (ret == -ENODATA)
69061 - fscache_stat(&fscache_n_retrievals_nodata);
69062 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
69063 else if (ret < 0)
69064 - fscache_stat(&fscache_n_retrievals_nobufs);
69065 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69066 else
69067 - fscache_stat(&fscache_n_retrievals_ok);
69068 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
69069
69070 fscache_put_retrieval(op);
69071 _leave(" = %d", ret);
69072 @@ -453,7 +453,7 @@ nobufs_unlock:
69073 spin_unlock(&cookie->lock);
69074 kfree(op);
69075 nobufs:
69076 - fscache_stat(&fscache_n_retrievals_nobufs);
69077 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69078 _leave(" = -ENOBUFS");
69079 return -ENOBUFS;
69080 }
69081 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69082
69083 _enter("%p,,%d,,,", cookie, *nr_pages);
69084
69085 - fscache_stat(&fscache_n_retrievals);
69086 + fscache_stat_unchecked(&fscache_n_retrievals);
69087
69088 if (hlist_empty(&cookie->backing_objects))
69089 goto nobufs;
69090 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69091 goto nobufs_unlock;
69092 spin_unlock(&cookie->lock);
69093
69094 - fscache_stat(&fscache_n_retrieval_ops);
69095 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
69096
69097 /* pin the netfs read context in case we need to do the actual netfs
69098 * read because we've encountered a cache read failure */
69099 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69100
69101 error:
69102 if (ret == -ENOMEM)
69103 - fscache_stat(&fscache_n_retrievals_nomem);
69104 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
69105 else if (ret == -ERESTARTSYS)
69106 - fscache_stat(&fscache_n_retrievals_intr);
69107 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
69108 else if (ret == -ENODATA)
69109 - fscache_stat(&fscache_n_retrievals_nodata);
69110 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
69111 else if (ret < 0)
69112 - fscache_stat(&fscache_n_retrievals_nobufs);
69113 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69114 else
69115 - fscache_stat(&fscache_n_retrievals_ok);
69116 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
69117
69118 fscache_put_retrieval(op);
69119 _leave(" = %d", ret);
69120 @@ -570,7 +570,7 @@ nobufs_unlock:
69121 spin_unlock(&cookie->lock);
69122 kfree(op);
69123 nobufs:
69124 - fscache_stat(&fscache_n_retrievals_nobufs);
69125 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69126 _leave(" = -ENOBUFS");
69127 return -ENOBUFS;
69128 }
69129 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69130
69131 _enter("%p,%p,,,", cookie, page);
69132
69133 - fscache_stat(&fscache_n_allocs);
69134 + fscache_stat_unchecked(&fscache_n_allocs);
69135
69136 if (hlist_empty(&cookie->backing_objects))
69137 goto nobufs;
69138 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69139 goto nobufs_unlock;
69140 spin_unlock(&cookie->lock);
69141
69142 - fscache_stat(&fscache_n_alloc_ops);
69143 + fscache_stat_unchecked(&fscache_n_alloc_ops);
69144
69145 ret = fscache_wait_for_retrieval_activation(
69146 object, op,
69147 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69148
69149 error:
69150 if (ret == -ERESTARTSYS)
69151 - fscache_stat(&fscache_n_allocs_intr);
69152 + fscache_stat_unchecked(&fscache_n_allocs_intr);
69153 else if (ret < 0)
69154 - fscache_stat(&fscache_n_allocs_nobufs);
69155 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
69156 else
69157 - fscache_stat(&fscache_n_allocs_ok);
69158 + fscache_stat_unchecked(&fscache_n_allocs_ok);
69159
69160 fscache_put_retrieval(op);
69161 _leave(" = %d", ret);
69162 @@ -651,7 +651,7 @@ nobufs_unlock:
69163 spin_unlock(&cookie->lock);
69164 kfree(op);
69165 nobufs:
69166 - fscache_stat(&fscache_n_allocs_nobufs);
69167 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
69168 _leave(" = -ENOBUFS");
69169 return -ENOBUFS;
69170 }
69171 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69172
69173 spin_lock(&cookie->stores_lock);
69174
69175 - fscache_stat(&fscache_n_store_calls);
69176 + fscache_stat_unchecked(&fscache_n_store_calls);
69177
69178 /* find a page to store */
69179 page = NULL;
69180 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69181 page = results[0];
69182 _debug("gang %d [%lx]", n, page->index);
69183 if (page->index > op->store_limit) {
69184 - fscache_stat(&fscache_n_store_pages_over_limit);
69185 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
69186 goto superseded;
69187 }
69188
69189 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69190
69191 if (page) {
69192 fscache_set_op_state(&op->op, "Store");
69193 - fscache_stat(&fscache_n_store_pages);
69194 + fscache_stat_unchecked(&fscache_n_store_pages);
69195 fscache_stat(&fscache_n_cop_write_page);
69196 ret = object->cache->ops->write_page(op, page);
69197 fscache_stat_d(&fscache_n_cop_write_page);
69198 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69199 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
69200 ASSERT(PageFsCache(page));
69201
69202 - fscache_stat(&fscache_n_stores);
69203 + fscache_stat_unchecked(&fscache_n_stores);
69204
69205 op = kzalloc(sizeof(*op), GFP_NOIO);
69206 if (!op)
69207 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69208 spin_unlock(&cookie->stores_lock);
69209 spin_unlock(&object->lock);
69210
69211 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
69212 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
69213 op->store_limit = object->store_limit;
69214
69215 if (fscache_submit_op(object, &op->op) < 0)
69216 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69217
69218 spin_unlock(&cookie->lock);
69219 radix_tree_preload_end();
69220 - fscache_stat(&fscache_n_store_ops);
69221 - fscache_stat(&fscache_n_stores_ok);
69222 + fscache_stat_unchecked(&fscache_n_store_ops);
69223 + fscache_stat_unchecked(&fscache_n_stores_ok);
69224
69225 /* the slow work queue now carries its own ref on the object */
69226 fscache_put_operation(&op->op);
69227 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69228 return 0;
69229
69230 already_queued:
69231 - fscache_stat(&fscache_n_stores_again);
69232 + fscache_stat_unchecked(&fscache_n_stores_again);
69233 already_pending:
69234 spin_unlock(&cookie->stores_lock);
69235 spin_unlock(&object->lock);
69236 spin_unlock(&cookie->lock);
69237 radix_tree_preload_end();
69238 kfree(op);
69239 - fscache_stat(&fscache_n_stores_ok);
69240 + fscache_stat_unchecked(&fscache_n_stores_ok);
69241 _leave(" = 0");
69242 return 0;
69243
69244 @@ -886,14 +886,14 @@ nobufs:
69245 spin_unlock(&cookie->lock);
69246 radix_tree_preload_end();
69247 kfree(op);
69248 - fscache_stat(&fscache_n_stores_nobufs);
69249 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
69250 _leave(" = -ENOBUFS");
69251 return -ENOBUFS;
69252
69253 nomem_free:
69254 kfree(op);
69255 nomem:
69256 - fscache_stat(&fscache_n_stores_oom);
69257 + fscache_stat_unchecked(&fscache_n_stores_oom);
69258 _leave(" = -ENOMEM");
69259 return -ENOMEM;
69260 }
69261 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
69262 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
69263 ASSERTCMP(page, !=, NULL);
69264
69265 - fscache_stat(&fscache_n_uncaches);
69266 + fscache_stat_unchecked(&fscache_n_uncaches);
69267
69268 /* cache withdrawal may beat us to it */
69269 if (!PageFsCache(page))
69270 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
69271 unsigned long loop;
69272
69273 #ifdef CONFIG_FSCACHE_STATS
69274 - atomic_add(pagevec->nr, &fscache_n_marks);
69275 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
69276 #endif
69277
69278 for (loop = 0; loop < pagevec->nr; loop++) {
69279 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
69280 index 46435f3..8cddf18 100644
69281 --- a/fs/fscache/stats.c
69282 +++ b/fs/fscache/stats.c
69283 @@ -18,95 +18,95 @@
69284 /*
69285 * operation counters
69286 */
69287 -atomic_t fscache_n_op_pend;
69288 -atomic_t fscache_n_op_run;
69289 -atomic_t fscache_n_op_enqueue;
69290 -atomic_t fscache_n_op_requeue;
69291 -atomic_t fscache_n_op_deferred_release;
69292 -atomic_t fscache_n_op_release;
69293 -atomic_t fscache_n_op_gc;
69294 -atomic_t fscache_n_op_cancelled;
69295 -atomic_t fscache_n_op_rejected;
69296 +atomic_unchecked_t fscache_n_op_pend;
69297 +atomic_unchecked_t fscache_n_op_run;
69298 +atomic_unchecked_t fscache_n_op_enqueue;
69299 +atomic_unchecked_t fscache_n_op_requeue;
69300 +atomic_unchecked_t fscache_n_op_deferred_release;
69301 +atomic_unchecked_t fscache_n_op_release;
69302 +atomic_unchecked_t fscache_n_op_gc;
69303 +atomic_unchecked_t fscache_n_op_cancelled;
69304 +atomic_unchecked_t fscache_n_op_rejected;
69305
69306 -atomic_t fscache_n_attr_changed;
69307 -atomic_t fscache_n_attr_changed_ok;
69308 -atomic_t fscache_n_attr_changed_nobufs;
69309 -atomic_t fscache_n_attr_changed_nomem;
69310 -atomic_t fscache_n_attr_changed_calls;
69311 +atomic_unchecked_t fscache_n_attr_changed;
69312 +atomic_unchecked_t fscache_n_attr_changed_ok;
69313 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
69314 +atomic_unchecked_t fscache_n_attr_changed_nomem;
69315 +atomic_unchecked_t fscache_n_attr_changed_calls;
69316
69317 -atomic_t fscache_n_allocs;
69318 -atomic_t fscache_n_allocs_ok;
69319 -atomic_t fscache_n_allocs_wait;
69320 -atomic_t fscache_n_allocs_nobufs;
69321 -atomic_t fscache_n_allocs_intr;
69322 -atomic_t fscache_n_allocs_object_dead;
69323 -atomic_t fscache_n_alloc_ops;
69324 -atomic_t fscache_n_alloc_op_waits;
69325 +atomic_unchecked_t fscache_n_allocs;
69326 +atomic_unchecked_t fscache_n_allocs_ok;
69327 +atomic_unchecked_t fscache_n_allocs_wait;
69328 +atomic_unchecked_t fscache_n_allocs_nobufs;
69329 +atomic_unchecked_t fscache_n_allocs_intr;
69330 +atomic_unchecked_t fscache_n_allocs_object_dead;
69331 +atomic_unchecked_t fscache_n_alloc_ops;
69332 +atomic_unchecked_t fscache_n_alloc_op_waits;
69333
69334 -atomic_t fscache_n_retrievals;
69335 -atomic_t fscache_n_retrievals_ok;
69336 -atomic_t fscache_n_retrievals_wait;
69337 -atomic_t fscache_n_retrievals_nodata;
69338 -atomic_t fscache_n_retrievals_nobufs;
69339 -atomic_t fscache_n_retrievals_intr;
69340 -atomic_t fscache_n_retrievals_nomem;
69341 -atomic_t fscache_n_retrievals_object_dead;
69342 -atomic_t fscache_n_retrieval_ops;
69343 -atomic_t fscache_n_retrieval_op_waits;
69344 +atomic_unchecked_t fscache_n_retrievals;
69345 +atomic_unchecked_t fscache_n_retrievals_ok;
69346 +atomic_unchecked_t fscache_n_retrievals_wait;
69347 +atomic_unchecked_t fscache_n_retrievals_nodata;
69348 +atomic_unchecked_t fscache_n_retrievals_nobufs;
69349 +atomic_unchecked_t fscache_n_retrievals_intr;
69350 +atomic_unchecked_t fscache_n_retrievals_nomem;
69351 +atomic_unchecked_t fscache_n_retrievals_object_dead;
69352 +atomic_unchecked_t fscache_n_retrieval_ops;
69353 +atomic_unchecked_t fscache_n_retrieval_op_waits;
69354
69355 -atomic_t fscache_n_stores;
69356 -atomic_t fscache_n_stores_ok;
69357 -atomic_t fscache_n_stores_again;
69358 -atomic_t fscache_n_stores_nobufs;
69359 -atomic_t fscache_n_stores_oom;
69360 -atomic_t fscache_n_store_ops;
69361 -atomic_t fscache_n_store_calls;
69362 -atomic_t fscache_n_store_pages;
69363 -atomic_t fscache_n_store_radix_deletes;
69364 -atomic_t fscache_n_store_pages_over_limit;
69365 +atomic_unchecked_t fscache_n_stores;
69366 +atomic_unchecked_t fscache_n_stores_ok;
69367 +atomic_unchecked_t fscache_n_stores_again;
69368 +atomic_unchecked_t fscache_n_stores_nobufs;
69369 +atomic_unchecked_t fscache_n_stores_oom;
69370 +atomic_unchecked_t fscache_n_store_ops;
69371 +atomic_unchecked_t fscache_n_store_calls;
69372 +atomic_unchecked_t fscache_n_store_pages;
69373 +atomic_unchecked_t fscache_n_store_radix_deletes;
69374 +atomic_unchecked_t fscache_n_store_pages_over_limit;
69375
69376 -atomic_t fscache_n_store_vmscan_not_storing;
69377 -atomic_t fscache_n_store_vmscan_gone;
69378 -atomic_t fscache_n_store_vmscan_busy;
69379 -atomic_t fscache_n_store_vmscan_cancelled;
69380 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
69381 +atomic_unchecked_t fscache_n_store_vmscan_gone;
69382 +atomic_unchecked_t fscache_n_store_vmscan_busy;
69383 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
69384
69385 -atomic_t fscache_n_marks;
69386 -atomic_t fscache_n_uncaches;
69387 +atomic_unchecked_t fscache_n_marks;
69388 +atomic_unchecked_t fscache_n_uncaches;
69389
69390 -atomic_t fscache_n_acquires;
69391 -atomic_t fscache_n_acquires_null;
69392 -atomic_t fscache_n_acquires_no_cache;
69393 -atomic_t fscache_n_acquires_ok;
69394 -atomic_t fscache_n_acquires_nobufs;
69395 -atomic_t fscache_n_acquires_oom;
69396 +atomic_unchecked_t fscache_n_acquires;
69397 +atomic_unchecked_t fscache_n_acquires_null;
69398 +atomic_unchecked_t fscache_n_acquires_no_cache;
69399 +atomic_unchecked_t fscache_n_acquires_ok;
69400 +atomic_unchecked_t fscache_n_acquires_nobufs;
69401 +atomic_unchecked_t fscache_n_acquires_oom;
69402
69403 -atomic_t fscache_n_updates;
69404 -atomic_t fscache_n_updates_null;
69405 -atomic_t fscache_n_updates_run;
69406 +atomic_unchecked_t fscache_n_updates;
69407 +atomic_unchecked_t fscache_n_updates_null;
69408 +atomic_unchecked_t fscache_n_updates_run;
69409
69410 -atomic_t fscache_n_relinquishes;
69411 -atomic_t fscache_n_relinquishes_null;
69412 -atomic_t fscache_n_relinquishes_waitcrt;
69413 -atomic_t fscache_n_relinquishes_retire;
69414 +atomic_unchecked_t fscache_n_relinquishes;
69415 +atomic_unchecked_t fscache_n_relinquishes_null;
69416 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
69417 +atomic_unchecked_t fscache_n_relinquishes_retire;
69418
69419 -atomic_t fscache_n_cookie_index;
69420 -atomic_t fscache_n_cookie_data;
69421 -atomic_t fscache_n_cookie_special;
69422 +atomic_unchecked_t fscache_n_cookie_index;
69423 +atomic_unchecked_t fscache_n_cookie_data;
69424 +atomic_unchecked_t fscache_n_cookie_special;
69425
69426 -atomic_t fscache_n_object_alloc;
69427 -atomic_t fscache_n_object_no_alloc;
69428 -atomic_t fscache_n_object_lookups;
69429 -atomic_t fscache_n_object_lookups_negative;
69430 -atomic_t fscache_n_object_lookups_positive;
69431 -atomic_t fscache_n_object_lookups_timed_out;
69432 -atomic_t fscache_n_object_created;
69433 -atomic_t fscache_n_object_avail;
69434 -atomic_t fscache_n_object_dead;
69435 +atomic_unchecked_t fscache_n_object_alloc;
69436 +atomic_unchecked_t fscache_n_object_no_alloc;
69437 +atomic_unchecked_t fscache_n_object_lookups;
69438 +atomic_unchecked_t fscache_n_object_lookups_negative;
69439 +atomic_unchecked_t fscache_n_object_lookups_positive;
69440 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
69441 +atomic_unchecked_t fscache_n_object_created;
69442 +atomic_unchecked_t fscache_n_object_avail;
69443 +atomic_unchecked_t fscache_n_object_dead;
69444
69445 -atomic_t fscache_n_checkaux_none;
69446 -atomic_t fscache_n_checkaux_okay;
69447 -atomic_t fscache_n_checkaux_update;
69448 -atomic_t fscache_n_checkaux_obsolete;
69449 +atomic_unchecked_t fscache_n_checkaux_none;
69450 +atomic_unchecked_t fscache_n_checkaux_okay;
69451 +atomic_unchecked_t fscache_n_checkaux_update;
69452 +atomic_unchecked_t fscache_n_checkaux_obsolete;
69453
69454 atomic_t fscache_n_cop_alloc_object;
69455 atomic_t fscache_n_cop_lookup_object;
69456 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
69457 seq_puts(m, "FS-Cache statistics\n");
69458
69459 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
69460 - atomic_read(&fscache_n_cookie_index),
69461 - atomic_read(&fscache_n_cookie_data),
69462 - atomic_read(&fscache_n_cookie_special));
69463 + atomic_read_unchecked(&fscache_n_cookie_index),
69464 + atomic_read_unchecked(&fscache_n_cookie_data),
69465 + atomic_read_unchecked(&fscache_n_cookie_special));
69466
69467 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
69468 - atomic_read(&fscache_n_object_alloc),
69469 - atomic_read(&fscache_n_object_no_alloc),
69470 - atomic_read(&fscache_n_object_avail),
69471 - atomic_read(&fscache_n_object_dead));
69472 + atomic_read_unchecked(&fscache_n_object_alloc),
69473 + atomic_read_unchecked(&fscache_n_object_no_alloc),
69474 + atomic_read_unchecked(&fscache_n_object_avail),
69475 + atomic_read_unchecked(&fscache_n_object_dead));
69476 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
69477 - atomic_read(&fscache_n_checkaux_none),
69478 - atomic_read(&fscache_n_checkaux_okay),
69479 - atomic_read(&fscache_n_checkaux_update),
69480 - atomic_read(&fscache_n_checkaux_obsolete));
69481 + atomic_read_unchecked(&fscache_n_checkaux_none),
69482 + atomic_read_unchecked(&fscache_n_checkaux_okay),
69483 + atomic_read_unchecked(&fscache_n_checkaux_update),
69484 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
69485
69486 seq_printf(m, "Pages : mrk=%u unc=%u\n",
69487 - atomic_read(&fscache_n_marks),
69488 - atomic_read(&fscache_n_uncaches));
69489 + atomic_read_unchecked(&fscache_n_marks),
69490 + atomic_read_unchecked(&fscache_n_uncaches));
69491
69492 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
69493 " oom=%u\n",
69494 - atomic_read(&fscache_n_acquires),
69495 - atomic_read(&fscache_n_acquires_null),
69496 - atomic_read(&fscache_n_acquires_no_cache),
69497 - atomic_read(&fscache_n_acquires_ok),
69498 - atomic_read(&fscache_n_acquires_nobufs),
69499 - atomic_read(&fscache_n_acquires_oom));
69500 + atomic_read_unchecked(&fscache_n_acquires),
69501 + atomic_read_unchecked(&fscache_n_acquires_null),
69502 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
69503 + atomic_read_unchecked(&fscache_n_acquires_ok),
69504 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
69505 + atomic_read_unchecked(&fscache_n_acquires_oom));
69506
69507 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
69508 - atomic_read(&fscache_n_object_lookups),
69509 - atomic_read(&fscache_n_object_lookups_negative),
69510 - atomic_read(&fscache_n_object_lookups_positive),
69511 - atomic_read(&fscache_n_object_lookups_timed_out),
69512 - atomic_read(&fscache_n_object_created));
69513 + atomic_read_unchecked(&fscache_n_object_lookups),
69514 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
69515 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
69516 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
69517 + atomic_read_unchecked(&fscache_n_object_created));
69518
69519 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
69520 - atomic_read(&fscache_n_updates),
69521 - atomic_read(&fscache_n_updates_null),
69522 - atomic_read(&fscache_n_updates_run));
69523 + atomic_read_unchecked(&fscache_n_updates),
69524 + atomic_read_unchecked(&fscache_n_updates_null),
69525 + atomic_read_unchecked(&fscache_n_updates_run));
69526
69527 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
69528 - atomic_read(&fscache_n_relinquishes),
69529 - atomic_read(&fscache_n_relinquishes_null),
69530 - atomic_read(&fscache_n_relinquishes_waitcrt),
69531 - atomic_read(&fscache_n_relinquishes_retire));
69532 + atomic_read_unchecked(&fscache_n_relinquishes),
69533 + atomic_read_unchecked(&fscache_n_relinquishes_null),
69534 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
69535 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
69536
69537 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
69538 - atomic_read(&fscache_n_attr_changed),
69539 - atomic_read(&fscache_n_attr_changed_ok),
69540 - atomic_read(&fscache_n_attr_changed_nobufs),
69541 - atomic_read(&fscache_n_attr_changed_nomem),
69542 - atomic_read(&fscache_n_attr_changed_calls));
69543 + atomic_read_unchecked(&fscache_n_attr_changed),
69544 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
69545 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
69546 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
69547 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
69548
69549 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
69550 - atomic_read(&fscache_n_allocs),
69551 - atomic_read(&fscache_n_allocs_ok),
69552 - atomic_read(&fscache_n_allocs_wait),
69553 - atomic_read(&fscache_n_allocs_nobufs),
69554 - atomic_read(&fscache_n_allocs_intr));
69555 + atomic_read_unchecked(&fscache_n_allocs),
69556 + atomic_read_unchecked(&fscache_n_allocs_ok),
69557 + atomic_read_unchecked(&fscache_n_allocs_wait),
69558 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
69559 + atomic_read_unchecked(&fscache_n_allocs_intr));
69560 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
69561 - atomic_read(&fscache_n_alloc_ops),
69562 - atomic_read(&fscache_n_alloc_op_waits),
69563 - atomic_read(&fscache_n_allocs_object_dead));
69564 + atomic_read_unchecked(&fscache_n_alloc_ops),
69565 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
69566 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
69567
69568 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
69569 " int=%u oom=%u\n",
69570 - atomic_read(&fscache_n_retrievals),
69571 - atomic_read(&fscache_n_retrievals_ok),
69572 - atomic_read(&fscache_n_retrievals_wait),
69573 - atomic_read(&fscache_n_retrievals_nodata),
69574 - atomic_read(&fscache_n_retrievals_nobufs),
69575 - atomic_read(&fscache_n_retrievals_intr),
69576 - atomic_read(&fscache_n_retrievals_nomem));
69577 + atomic_read_unchecked(&fscache_n_retrievals),
69578 + atomic_read_unchecked(&fscache_n_retrievals_ok),
69579 + atomic_read_unchecked(&fscache_n_retrievals_wait),
69580 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
69581 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
69582 + atomic_read_unchecked(&fscache_n_retrievals_intr),
69583 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
69584 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
69585 - atomic_read(&fscache_n_retrieval_ops),
69586 - atomic_read(&fscache_n_retrieval_op_waits),
69587 - atomic_read(&fscache_n_retrievals_object_dead));
69588 + atomic_read_unchecked(&fscache_n_retrieval_ops),
69589 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
69590 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
69591
69592 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
69593 - atomic_read(&fscache_n_stores),
69594 - atomic_read(&fscache_n_stores_ok),
69595 - atomic_read(&fscache_n_stores_again),
69596 - atomic_read(&fscache_n_stores_nobufs),
69597 - atomic_read(&fscache_n_stores_oom));
69598 + atomic_read_unchecked(&fscache_n_stores),
69599 + atomic_read_unchecked(&fscache_n_stores_ok),
69600 + atomic_read_unchecked(&fscache_n_stores_again),
69601 + atomic_read_unchecked(&fscache_n_stores_nobufs),
69602 + atomic_read_unchecked(&fscache_n_stores_oom));
69603 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
69604 - atomic_read(&fscache_n_store_ops),
69605 - atomic_read(&fscache_n_store_calls),
69606 - atomic_read(&fscache_n_store_pages),
69607 - atomic_read(&fscache_n_store_radix_deletes),
69608 - atomic_read(&fscache_n_store_pages_over_limit));
69609 + atomic_read_unchecked(&fscache_n_store_ops),
69610 + atomic_read_unchecked(&fscache_n_store_calls),
69611 + atomic_read_unchecked(&fscache_n_store_pages),
69612 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
69613 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
69614
69615 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
69616 - atomic_read(&fscache_n_store_vmscan_not_storing),
69617 - atomic_read(&fscache_n_store_vmscan_gone),
69618 - atomic_read(&fscache_n_store_vmscan_busy),
69619 - atomic_read(&fscache_n_store_vmscan_cancelled));
69620 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
69621 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
69622 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
69623 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
69624
69625 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
69626 - atomic_read(&fscache_n_op_pend),
69627 - atomic_read(&fscache_n_op_run),
69628 - atomic_read(&fscache_n_op_enqueue),
69629 - atomic_read(&fscache_n_op_cancelled),
69630 - atomic_read(&fscache_n_op_rejected));
69631 + atomic_read_unchecked(&fscache_n_op_pend),
69632 + atomic_read_unchecked(&fscache_n_op_run),
69633 + atomic_read_unchecked(&fscache_n_op_enqueue),
69634 + atomic_read_unchecked(&fscache_n_op_cancelled),
69635 + atomic_read_unchecked(&fscache_n_op_rejected));
69636 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
69637 - atomic_read(&fscache_n_op_deferred_release),
69638 - atomic_read(&fscache_n_op_release),
69639 - atomic_read(&fscache_n_op_gc));
69640 + atomic_read_unchecked(&fscache_n_op_deferred_release),
69641 + atomic_read_unchecked(&fscache_n_op_release),
69642 + atomic_read_unchecked(&fscache_n_op_gc));
69643
69644 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
69645 atomic_read(&fscache_n_cop_alloc_object),
69646 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
69647 index de792dc..448b532 100644
69648 --- a/fs/fuse/cuse.c
69649 +++ b/fs/fuse/cuse.c
69650 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
69651 INIT_LIST_HEAD(&cuse_conntbl[i]);
69652
69653 /* inherit and extend fuse_dev_operations */
69654 - cuse_channel_fops = fuse_dev_operations;
69655 - cuse_channel_fops.owner = THIS_MODULE;
69656 - cuse_channel_fops.open = cuse_channel_open;
69657 - cuse_channel_fops.release = cuse_channel_release;
69658 + pax_open_kernel();
69659 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
69660 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
69661 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
69662 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
69663 + pax_close_kernel();
69664
69665 cuse_class = class_create(THIS_MODULE, "cuse");
69666 if (IS_ERR(cuse_class))
69667 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
69668 index 1facb39..7f48557 100644
69669 --- a/fs/fuse/dev.c
69670 +++ b/fs/fuse/dev.c
69671 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69672 {
69673 struct fuse_notify_inval_entry_out outarg;
69674 int err = -EINVAL;
69675 - char buf[FUSE_NAME_MAX+1];
69676 + char *buf = NULL;
69677 struct qstr name;
69678
69679 if (size < sizeof(outarg))
69680 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69681 if (outarg.namelen > FUSE_NAME_MAX)
69682 goto err;
69683
69684 + err = -ENOMEM;
69685 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
69686 + if (!buf)
69687 + goto err;
69688 +
69689 err = -EINVAL;
69690 if (size != sizeof(outarg) + outarg.namelen + 1)
69691 goto err;
69692 @@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69693
69694 down_read(&fc->killsb);
69695 err = -ENOENT;
69696 - if (!fc->sb)
69697 - goto err_unlock;
69698 -
69699 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
69700 -
69701 -err_unlock:
69702 + if (fc->sb)
69703 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
69704 up_read(&fc->killsb);
69705 + kfree(buf);
69706 return err;
69707
69708 err:
69709 fuse_copy_finish(cs);
69710 + kfree(buf);
69711 return err;
69712 }
69713
69714 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
69715 index 4787ae6..73efff7 100644
69716 --- a/fs/fuse/dir.c
69717 +++ b/fs/fuse/dir.c
69718 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
69719 return link;
69720 }
69721
69722 -static void free_link(char *link)
69723 +static void free_link(const char *link)
69724 {
69725 if (!IS_ERR(link))
69726 free_page((unsigned long) link);
69727 diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
69728 index 247436c..e650ccb 100644
69729 --- a/fs/gfs2/ops_inode.c
69730 +++ b/fs/gfs2/ops_inode.c
69731 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
69732 unsigned int x;
69733 int error;
69734
69735 + pax_track_stack();
69736 +
69737 if (ndentry->d_inode) {
69738 nip = GFS2_I(ndentry->d_inode);
69739 if (ip == nip)
69740 diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
69741 index 4463297..4fed53b 100644
69742 --- a/fs/gfs2/sys.c
69743 +++ b/fs/gfs2/sys.c
69744 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
69745 return a->store ? a->store(sdp, buf, len) : len;
69746 }
69747
69748 -static struct sysfs_ops gfs2_attr_ops = {
69749 +static const struct sysfs_ops gfs2_attr_ops = {
69750 .show = gfs2_attr_show,
69751 .store = gfs2_attr_store,
69752 };
69753 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
69754 return 0;
69755 }
69756
69757 -static struct kset_uevent_ops gfs2_uevent_ops = {
69758 +static const struct kset_uevent_ops gfs2_uevent_ops = {
69759 .uevent = gfs2_uevent,
69760 };
69761
69762 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
69763 index f6874ac..7cd98a8 100644
69764 --- a/fs/hfsplus/catalog.c
69765 +++ b/fs/hfsplus/catalog.c
69766 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
69767 int err;
69768 u16 type;
69769
69770 + pax_track_stack();
69771 +
69772 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
69773 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
69774 if (err)
69775 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
69776 int entry_size;
69777 int err;
69778
69779 + pax_track_stack();
69780 +
69781 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
69782 sb = dir->i_sb;
69783 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
69784 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
69785 int entry_size, type;
69786 int err = 0;
69787
69788 + pax_track_stack();
69789 +
69790 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
69791 dst_dir->i_ino, dst_name->name);
69792 sb = src_dir->i_sb;
69793 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
69794 index 5f40236..dac3421 100644
69795 --- a/fs/hfsplus/dir.c
69796 +++ b/fs/hfsplus/dir.c
69797 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
69798 struct hfsplus_readdir_data *rd;
69799 u16 type;
69800
69801 + pax_track_stack();
69802 +
69803 if (filp->f_pos >= inode->i_size)
69804 return 0;
69805
69806 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
69807 index 1bcf597..905a251 100644
69808 --- a/fs/hfsplus/inode.c
69809 +++ b/fs/hfsplus/inode.c
69810 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
69811 int res = 0;
69812 u16 type;
69813
69814 + pax_track_stack();
69815 +
69816 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
69817
69818 HFSPLUS_I(inode).dev = 0;
69819 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
69820 struct hfs_find_data fd;
69821 hfsplus_cat_entry entry;
69822
69823 + pax_track_stack();
69824 +
69825 if (HFSPLUS_IS_RSRC(inode))
69826 main_inode = HFSPLUS_I(inode).rsrc_inode;
69827
69828 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
69829 index f457d2c..7ef4ad5 100644
69830 --- a/fs/hfsplus/ioctl.c
69831 +++ b/fs/hfsplus/ioctl.c
69832 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
69833 struct hfsplus_cat_file *file;
69834 int res;
69835
69836 + pax_track_stack();
69837 +
69838 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
69839 return -EOPNOTSUPP;
69840
69841 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
69842 struct hfsplus_cat_file *file;
69843 ssize_t res = 0;
69844
69845 + pax_track_stack();
69846 +
69847 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
69848 return -EOPNOTSUPP;
69849
69850 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
69851 index 43022f3..7298079 100644
69852 --- a/fs/hfsplus/super.c
69853 +++ b/fs/hfsplus/super.c
69854 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
69855 struct nls_table *nls = NULL;
69856 int err = -EINVAL;
69857
69858 + pax_track_stack();
69859 +
69860 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
69861 if (!sbi)
69862 return -ENOMEM;
69863 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
69864 index 87a1258..5694d91 100644
69865 --- a/fs/hugetlbfs/inode.c
69866 +++ b/fs/hugetlbfs/inode.c
69867 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
69868 .kill_sb = kill_litter_super,
69869 };
69870
69871 -static struct vfsmount *hugetlbfs_vfsmount;
69872 +struct vfsmount *hugetlbfs_vfsmount;
69873
69874 static int can_do_hugetlb_shm(void)
69875 {
69876 diff --git a/fs/ioctl.c b/fs/ioctl.c
69877 index 6c75110..19d2c3c 100644
69878 --- a/fs/ioctl.c
69879 +++ b/fs/ioctl.c
69880 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
69881 u64 phys, u64 len, u32 flags)
69882 {
69883 struct fiemap_extent extent;
69884 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
69885 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
69886
69887 /* only count the extents */
69888 if (fieinfo->fi_extents_max == 0) {
69889 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
69890
69891 fieinfo.fi_flags = fiemap.fm_flags;
69892 fieinfo.fi_extents_max = fiemap.fm_extent_count;
69893 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
69894 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
69895
69896 if (fiemap.fm_extent_count != 0 &&
69897 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
69898 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
69899 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
69900 fiemap.fm_flags = fieinfo.fi_flags;
69901 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
69902 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
69903 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
69904 error = -EFAULT;
69905
69906 return error;
69907 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
69908 index b0435dd..81ee0be 100644
69909 --- a/fs/jbd/checkpoint.c
69910 +++ b/fs/jbd/checkpoint.c
69911 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
69912 tid_t this_tid;
69913 int result;
69914
69915 + pax_track_stack();
69916 +
69917 jbd_debug(1, "Start checkpoint\n");
69918
69919 /*
69920 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
69921 index 546d153..736896c 100644
69922 --- a/fs/jffs2/compr_rtime.c
69923 +++ b/fs/jffs2/compr_rtime.c
69924 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
69925 int outpos = 0;
69926 int pos=0;
69927
69928 + pax_track_stack();
69929 +
69930 memset(positions,0,sizeof(positions));
69931
69932 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
69933 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
69934 int outpos = 0;
69935 int pos=0;
69936
69937 + pax_track_stack();
69938 +
69939 memset(positions,0,sizeof(positions));
69940
69941 while (outpos<destlen) {
69942 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
69943 index 170d289..3254b98 100644
69944 --- a/fs/jffs2/compr_rubin.c
69945 +++ b/fs/jffs2/compr_rubin.c
69946 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
69947 int ret;
69948 uint32_t mysrclen, mydstlen;
69949
69950 + pax_track_stack();
69951 +
69952 mysrclen = *sourcelen;
69953 mydstlen = *dstlen - 8;
69954
69955 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
69956 index b47679b..00d65d3 100644
69957 --- a/fs/jffs2/erase.c
69958 +++ b/fs/jffs2/erase.c
69959 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
69960 struct jffs2_unknown_node marker = {
69961 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
69962 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
69963 - .totlen = cpu_to_je32(c->cleanmarker_size)
69964 + .totlen = cpu_to_je32(c->cleanmarker_size),
69965 + .hdr_crc = cpu_to_je32(0)
69966 };
69967
69968 jffs2_prealloc_raw_node_refs(c, jeb, 1);
69969 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
69970 index 5ef7bac..4fd1e3c 100644
69971 --- a/fs/jffs2/wbuf.c
69972 +++ b/fs/jffs2/wbuf.c
69973 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
69974 {
69975 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
69976 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
69977 - .totlen = constant_cpu_to_je32(8)
69978 + .totlen = constant_cpu_to_je32(8),
69979 + .hdr_crc = constant_cpu_to_je32(0)
69980 };
69981
69982 /*
69983 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
69984 index 082e844..52012a1 100644
69985 --- a/fs/jffs2/xattr.c
69986 +++ b/fs/jffs2/xattr.c
69987 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
69988
69989 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
69990
69991 + pax_track_stack();
69992 +
69993 /* Phase.1 : Merge same xref */
69994 for (i=0; i < XREF_TMPHASH_SIZE; i++)
69995 xref_tmphash[i] = NULL;
69996 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
69997 index 2234c73..f6e6e6b 100644
69998 --- a/fs/jfs/super.c
69999 +++ b/fs/jfs/super.c
70000 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
70001
70002 jfs_inode_cachep =
70003 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
70004 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
70005 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
70006 init_once);
70007 if (jfs_inode_cachep == NULL)
70008 return -ENOMEM;
70009 diff --git a/fs/libfs.c b/fs/libfs.c
70010 index ba36e93..3153fce 100644
70011 --- a/fs/libfs.c
70012 +++ b/fs/libfs.c
70013 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
70014
70015 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
70016 struct dentry *next;
70017 + char d_name[sizeof(next->d_iname)];
70018 + const unsigned char *name;
70019 +
70020 next = list_entry(p, struct dentry, d_u.d_child);
70021 if (d_unhashed(next) || !next->d_inode)
70022 continue;
70023
70024 spin_unlock(&dcache_lock);
70025 - if (filldir(dirent, next->d_name.name,
70026 + name = next->d_name.name;
70027 + if (name == next->d_iname) {
70028 + memcpy(d_name, name, next->d_name.len);
70029 + name = d_name;
70030 + }
70031 + if (filldir(dirent, name,
70032 next->d_name.len, filp->f_pos,
70033 next->d_inode->i_ino,
70034 dt_type(next->d_inode)) < 0)
70035 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
70036 index c325a83..d15b07b 100644
70037 --- a/fs/lockd/clntproc.c
70038 +++ b/fs/lockd/clntproc.c
70039 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
70040 /*
70041 * Cookie counter for NLM requests
70042 */
70043 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
70044 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
70045
70046 void nlmclnt_next_cookie(struct nlm_cookie *c)
70047 {
70048 - u32 cookie = atomic_inc_return(&nlm_cookie);
70049 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
70050
70051 memcpy(c->data, &cookie, 4);
70052 c->len=4;
70053 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
70054 struct nlm_rqst reqst, *req;
70055 int status;
70056
70057 + pax_track_stack();
70058 +
70059 req = &reqst;
70060 memset(req, 0, sizeof(*req));
70061 locks_init_lock(&req->a_args.lock.fl);
70062 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
70063 index 1a54ae1..6a16c27 100644
70064 --- a/fs/lockd/svc.c
70065 +++ b/fs/lockd/svc.c
70066 @@ -43,7 +43,7 @@
70067
70068 static struct svc_program nlmsvc_program;
70069
70070 -struct nlmsvc_binding * nlmsvc_ops;
70071 +const struct nlmsvc_binding * nlmsvc_ops;
70072 EXPORT_SYMBOL_GPL(nlmsvc_ops);
70073
70074 static DEFINE_MUTEX(nlmsvc_mutex);
70075 diff --git a/fs/locks.c b/fs/locks.c
70076 index a8794f2..4041e55 100644
70077 --- a/fs/locks.c
70078 +++ b/fs/locks.c
70079 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
70080
70081 static struct kmem_cache *filelock_cache __read_mostly;
70082
70083 +static void locks_init_lock_always(struct file_lock *fl)
70084 +{
70085 + fl->fl_next = NULL;
70086 + fl->fl_fasync = NULL;
70087 + fl->fl_owner = NULL;
70088 + fl->fl_pid = 0;
70089 + fl->fl_nspid = NULL;
70090 + fl->fl_file = NULL;
70091 + fl->fl_flags = 0;
70092 + fl->fl_type = 0;
70093 + fl->fl_start = fl->fl_end = 0;
70094 +}
70095 +
70096 /* Allocate an empty lock structure. */
70097 static struct file_lock *locks_alloc_lock(void)
70098 {
70099 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
70100 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
70101 +
70102 + if (fl)
70103 + locks_init_lock_always(fl);
70104 +
70105 + return fl;
70106 }
70107
70108 void locks_release_private(struct file_lock *fl)
70109 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
70110 INIT_LIST_HEAD(&fl->fl_link);
70111 INIT_LIST_HEAD(&fl->fl_block);
70112 init_waitqueue_head(&fl->fl_wait);
70113 - fl->fl_next = NULL;
70114 - fl->fl_fasync = NULL;
70115 - fl->fl_owner = NULL;
70116 - fl->fl_pid = 0;
70117 - fl->fl_nspid = NULL;
70118 - fl->fl_file = NULL;
70119 - fl->fl_flags = 0;
70120 - fl->fl_type = 0;
70121 - fl->fl_start = fl->fl_end = 0;
70122 fl->fl_ops = NULL;
70123 fl->fl_lmops = NULL;
70124 + locks_init_lock_always(fl);
70125 }
70126
70127 EXPORT_SYMBOL(locks_init_lock);
70128 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
70129 return;
70130
70131 if (filp->f_op && filp->f_op->flock) {
70132 - struct file_lock fl = {
70133 + struct file_lock flock = {
70134 .fl_pid = current->tgid,
70135 .fl_file = filp,
70136 .fl_flags = FL_FLOCK,
70137 .fl_type = F_UNLCK,
70138 .fl_end = OFFSET_MAX,
70139 };
70140 - filp->f_op->flock(filp, F_SETLKW, &fl);
70141 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
70142 - fl.fl_ops->fl_release_private(&fl);
70143 + filp->f_op->flock(filp, F_SETLKW, &flock);
70144 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
70145 + flock.fl_ops->fl_release_private(&flock);
70146 }
70147
70148 lock_kernel();
70149 diff --git a/fs/mbcache.c b/fs/mbcache.c
70150 index ec88ff3..b843a82 100644
70151 --- a/fs/mbcache.c
70152 +++ b/fs/mbcache.c
70153 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
70154 if (!cache)
70155 goto fail;
70156 cache->c_name = name;
70157 - cache->c_op.free = NULL;
70158 + *(void **)&cache->c_op.free = NULL;
70159 if (cache_op)
70160 - cache->c_op.free = cache_op->free;
70161 + *(void **)&cache->c_op.free = cache_op->free;
70162 atomic_set(&cache->c_entry_count, 0);
70163 cache->c_bucket_bits = bucket_bits;
70164 #ifdef MB_CACHE_INDEXES_COUNT
70165 diff --git a/fs/namei.c b/fs/namei.c
70166 index b0afbd4..8d065a1 100644
70167 --- a/fs/namei.c
70168 +++ b/fs/namei.c
70169 @@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
70170 return ret;
70171
70172 /*
70173 + * Searching includes executable on directories, else just read.
70174 + */
70175 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
70176 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
70177 + if (capable(CAP_DAC_READ_SEARCH))
70178 + return 0;
70179 +
70180 + /*
70181 * Read/write DACs are always overridable.
70182 * Executable DACs are overridable if at least one exec bit is set.
70183 */
70184 @@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
70185 if (capable(CAP_DAC_OVERRIDE))
70186 return 0;
70187
70188 - /*
70189 - * Searching includes executable on directories, else just read.
70190 - */
70191 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
70192 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
70193 - if (capable(CAP_DAC_READ_SEARCH))
70194 - return 0;
70195 -
70196 return -EACCES;
70197 }
70198
70199 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
70200 if (!ret)
70201 goto ok;
70202
70203 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
70204 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
70205 + capable(CAP_DAC_OVERRIDE))
70206 goto ok;
70207
70208 return ret;
70209 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
70210 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
70211 error = PTR_ERR(cookie);
70212 if (!IS_ERR(cookie)) {
70213 - char *s = nd_get_link(nd);
70214 + const char *s = nd_get_link(nd);
70215 error = 0;
70216 if (s)
70217 error = __vfs_follow_link(nd, s);
70218 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
70219 err = security_inode_follow_link(path->dentry, nd);
70220 if (err)
70221 goto loop;
70222 +
70223 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
70224 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
70225 + err = -EACCES;
70226 + goto loop;
70227 + }
70228 +
70229 current->link_count++;
70230 current->total_link_count++;
70231 nd->depth++;
70232 @@ -1016,11 +1024,19 @@ return_reval:
70233 break;
70234 }
70235 return_base:
70236 + if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
70237 + !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
70238 + path_put(&nd->path);
70239 + return -ENOENT;
70240 + }
70241 return 0;
70242 out_dput:
70243 path_put_conditional(&next, nd);
70244 break;
70245 }
70246 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
70247 + err = -ENOENT;
70248 +
70249 path_put(&nd->path);
70250 return_err:
70251 return err;
70252 @@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
70253 int retval = path_init(dfd, name, flags, nd);
70254 if (!retval)
70255 retval = path_walk(name, nd);
70256 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
70257 - nd->path.dentry->d_inode))
70258 - audit_inode(name, nd->path.dentry);
70259 +
70260 + if (likely(!retval)) {
70261 + if (nd->path.dentry && nd->path.dentry->d_inode) {
70262 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
70263 + retval = -ENOENT;
70264 + if (!audit_dummy_context())
70265 + audit_inode(name, nd->path.dentry);
70266 + }
70267 + }
70268 if (nd->root.mnt) {
70269 path_put(&nd->root);
70270 nd->root.mnt = NULL;
70271 }
70272 +
70273 return retval;
70274 }
70275
70276 @@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
70277 if (error)
70278 goto err_out;
70279
70280 +
70281 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
70282 + error = -EPERM;
70283 + goto err_out;
70284 + }
70285 + if (gr_handle_rawio(inode)) {
70286 + error = -EPERM;
70287 + goto err_out;
70288 + }
70289 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
70290 + error = -EACCES;
70291 + goto err_out;
70292 + }
70293 +
70294 if (flag & O_TRUNC) {
70295 error = get_write_access(inode);
70296 if (error)
70297 @@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
70298 {
70299 int error;
70300 struct dentry *dir = nd->path.dentry;
70301 + int acc_mode = ACC_MODE(flag);
70302 +
70303 + if (flag & O_TRUNC)
70304 + acc_mode |= MAY_WRITE;
70305 + if (flag & O_APPEND)
70306 + acc_mode |= MAY_APPEND;
70307 +
70308 + if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
70309 + error = -EACCES;
70310 + goto out_unlock;
70311 + }
70312
70313 if (!IS_POSIXACL(dir->d_inode))
70314 mode &= ~current_umask();
70315 @@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
70316 if (error)
70317 goto out_unlock;
70318 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
70319 + if (!error)
70320 + gr_handle_create(path->dentry, nd->path.mnt);
70321 out_unlock:
70322 mutex_unlock(&dir->d_inode->i_mutex);
70323 dput(nd->path.dentry);
70324 @@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
70325 &nd, flag);
70326 if (error)
70327 return ERR_PTR(error);
70328 +
70329 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
70330 + error = -EPERM;
70331 + goto exit;
70332 + }
70333 +
70334 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
70335 + error = -EPERM;
70336 + goto exit;
70337 + }
70338 +
70339 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
70340 + error = -EACCES;
70341 + goto exit;
70342 + }
70343 +
70344 goto ok;
70345 }
70346
70347 @@ -1795,6 +1861,19 @@ do_last:
70348 /*
70349 * It already exists.
70350 */
70351 +
70352 + if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
70353 + error = -ENOENT;
70354 + goto exit_mutex_unlock;
70355 + }
70356 +
70357 + /* only check if O_CREAT is specified, all other checks need
70358 + to go into may_open */
70359 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
70360 + error = -EACCES;
70361 + goto exit_mutex_unlock;
70362 + }
70363 +
70364 mutex_unlock(&dir->d_inode->i_mutex);
70365 audit_inode(pathname, path.dentry);
70366
70367 @@ -1887,6 +1966,13 @@ do_link:
70368 error = security_inode_follow_link(path.dentry, &nd);
70369 if (error)
70370 goto exit_dput;
70371 +
70372 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
70373 + path.dentry, nd.path.mnt)) {
70374 + error = -EACCES;
70375 + goto exit_dput;
70376 + }
70377 +
70378 error = __do_follow_link(&path, &nd);
70379 if (error) {
70380 /* Does someone understand code flow here? Or it is only
70381 @@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
70382 }
70383 return dentry;
70384 eexist:
70385 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
70386 + dput(dentry);
70387 + return ERR_PTR(-ENOENT);
70388 + }
70389 dput(dentry);
70390 dentry = ERR_PTR(-EEXIST);
70391 fail:
70392 @@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
70393 error = may_mknod(mode);
70394 if (error)
70395 goto out_dput;
70396 +
70397 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
70398 + error = -EPERM;
70399 + goto out_dput;
70400 + }
70401 +
70402 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70403 + error = -EACCES;
70404 + goto out_dput;
70405 + }
70406 +
70407 error = mnt_want_write(nd.path.mnt);
70408 if (error)
70409 goto out_dput;
70410 @@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
70411 }
70412 out_drop_write:
70413 mnt_drop_write(nd.path.mnt);
70414 +
70415 + if (!error)
70416 + gr_handle_create(dentry, nd.path.mnt);
70417 out_dput:
70418 dput(dentry);
70419 out_unlock:
70420 @@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
70421 if (IS_ERR(dentry))
70422 goto out_unlock;
70423
70424 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
70425 + error = -EACCES;
70426 + goto out_dput;
70427 + }
70428 +
70429 if (!IS_POSIXACL(nd.path.dentry->d_inode))
70430 mode &= ~current_umask();
70431 error = mnt_want_write(nd.path.mnt);
70432 @@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
70433 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
70434 out_drop_write:
70435 mnt_drop_write(nd.path.mnt);
70436 +
70437 + if (!error)
70438 + gr_handle_create(dentry, nd.path.mnt);
70439 +
70440 out_dput:
70441 dput(dentry);
70442 out_unlock:
70443 @@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
70444 char * name;
70445 struct dentry *dentry;
70446 struct nameidata nd;
70447 + ino_t saved_ino = 0;
70448 + dev_t saved_dev = 0;
70449
70450 error = user_path_parent(dfd, pathname, &nd, &name);
70451 if (error)
70452 @@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
70453 error = PTR_ERR(dentry);
70454 if (IS_ERR(dentry))
70455 goto exit2;
70456 +
70457 + if (dentry->d_inode != NULL) {
70458 + saved_ino = dentry->d_inode->i_ino;
70459 + saved_dev = gr_get_dev_from_dentry(dentry);
70460 +
70461 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
70462 + error = -EACCES;
70463 + goto exit3;
70464 + }
70465 + }
70466 +
70467 error = mnt_want_write(nd.path.mnt);
70468 if (error)
70469 goto exit3;
70470 @@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
70471 if (error)
70472 goto exit4;
70473 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
70474 + if (!error && (saved_dev || saved_ino))
70475 + gr_handle_delete(saved_ino, saved_dev);
70476 exit4:
70477 mnt_drop_write(nd.path.mnt);
70478 exit3:
70479 @@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70480 struct dentry *dentry;
70481 struct nameidata nd;
70482 struct inode *inode = NULL;
70483 + ino_t saved_ino = 0;
70484 + dev_t saved_dev = 0;
70485
70486 error = user_path_parent(dfd, pathname, &nd, &name);
70487 if (error)
70488 @@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70489 if (nd.last.name[nd.last.len])
70490 goto slashes;
70491 inode = dentry->d_inode;
70492 - if (inode)
70493 + if (inode) {
70494 + if (inode->i_nlink <= 1) {
70495 + saved_ino = inode->i_ino;
70496 + saved_dev = gr_get_dev_from_dentry(dentry);
70497 + }
70498 +
70499 atomic_inc(&inode->i_count);
70500 +
70501 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
70502 + error = -EACCES;
70503 + goto exit2;
70504 + }
70505 + }
70506 error = mnt_want_write(nd.path.mnt);
70507 if (error)
70508 goto exit2;
70509 @@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70510 if (error)
70511 goto exit3;
70512 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
70513 + if (!error && (saved_ino || saved_dev))
70514 + gr_handle_delete(saved_ino, saved_dev);
70515 exit3:
70516 mnt_drop_write(nd.path.mnt);
70517 exit2:
70518 @@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
70519 if (IS_ERR(dentry))
70520 goto out_unlock;
70521
70522 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
70523 + error = -EACCES;
70524 + goto out_dput;
70525 + }
70526 +
70527 error = mnt_want_write(nd.path.mnt);
70528 if (error)
70529 goto out_dput;
70530 @@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
70531 if (error)
70532 goto out_drop_write;
70533 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
70534 + if (!error)
70535 + gr_handle_create(dentry, nd.path.mnt);
70536 out_drop_write:
70537 mnt_drop_write(nd.path.mnt);
70538 out_dput:
70539 @@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
70540 error = PTR_ERR(new_dentry);
70541 if (IS_ERR(new_dentry))
70542 goto out_unlock;
70543 +
70544 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
70545 + old_path.dentry->d_inode,
70546 + old_path.dentry->d_inode->i_mode, to)) {
70547 + error = -EACCES;
70548 + goto out_dput;
70549 + }
70550 +
70551 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
70552 + old_path.dentry, old_path.mnt, to)) {
70553 + error = -EACCES;
70554 + goto out_dput;
70555 + }
70556 +
70557 error = mnt_want_write(nd.path.mnt);
70558 if (error)
70559 goto out_dput;
70560 @@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
70561 if (error)
70562 goto out_drop_write;
70563 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
70564 + if (!error)
70565 + gr_handle_create(new_dentry, nd.path.mnt);
70566 out_drop_write:
70567 mnt_drop_write(nd.path.mnt);
70568 out_dput:
70569 @@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70570 char *to;
70571 int error;
70572
70573 + pax_track_stack();
70574 +
70575 error = user_path_parent(olddfd, oldname, &oldnd, &from);
70576 if (error)
70577 goto exit;
70578 @@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70579 if (new_dentry == trap)
70580 goto exit5;
70581
70582 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
70583 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
70584 + to);
70585 + if (error)
70586 + goto exit5;
70587 +
70588 error = mnt_want_write(oldnd.path.mnt);
70589 if (error)
70590 goto exit5;
70591 @@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70592 goto exit6;
70593 error = vfs_rename(old_dir->d_inode, old_dentry,
70594 new_dir->d_inode, new_dentry);
70595 + if (!error)
70596 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
70597 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
70598 exit6:
70599 mnt_drop_write(oldnd.path.mnt);
70600 exit5:
70601 @@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
70602
70603 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
70604 {
70605 + char tmpbuf[64];
70606 + const char *newlink;
70607 int len;
70608
70609 len = PTR_ERR(link);
70610 @@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
70611 len = strlen(link);
70612 if (len > (unsigned) buflen)
70613 len = buflen;
70614 - if (copy_to_user(buffer, link, len))
70615 +
70616 + if (len < sizeof(tmpbuf)) {
70617 + memcpy(tmpbuf, link, len);
70618 + newlink = tmpbuf;
70619 + } else
70620 + newlink = link;
70621 +
70622 + if (copy_to_user(buffer, newlink, len))
70623 len = -EFAULT;
70624 out:
70625 return len;
70626 diff --git a/fs/namespace.c b/fs/namespace.c
70627 index 2beb0fb..11a95a5 100644
70628 --- a/fs/namespace.c
70629 +++ b/fs/namespace.c
70630 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
70631 if (!(sb->s_flags & MS_RDONLY))
70632 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
70633 up_write(&sb->s_umount);
70634 +
70635 + gr_log_remount(mnt->mnt_devname, retval);
70636 +
70637 return retval;
70638 }
70639
70640 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
70641 security_sb_umount_busy(mnt);
70642 up_write(&namespace_sem);
70643 release_mounts(&umount_list);
70644 +
70645 + gr_log_unmount(mnt->mnt_devname, retval);
70646 +
70647 return retval;
70648 }
70649
70650 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
70651 if (retval)
70652 goto dput_out;
70653
70654 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
70655 + retval = -EPERM;
70656 + goto dput_out;
70657 + }
70658 +
70659 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
70660 + retval = -EPERM;
70661 + goto dput_out;
70662 + }
70663 +
70664 if (flags & MS_REMOUNT)
70665 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
70666 data_page);
70667 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
70668 dev_name, data_page);
70669 dput_out:
70670 path_put(&path);
70671 +
70672 + gr_log_mount(dev_name, dir_name, retval);
70673 +
70674 return retval;
70675 }
70676
70677 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
70678 goto out1;
70679 }
70680
70681 + if (gr_handle_chroot_pivot()) {
70682 + error = -EPERM;
70683 + path_put(&old);
70684 + goto out1;
70685 + }
70686 +
70687 read_lock(&current->fs->lock);
70688 root = current->fs->root;
70689 path_get(&current->fs->root);
70690 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
70691 index b8b5b30..2bd9ccb 100644
70692 --- a/fs/ncpfs/dir.c
70693 +++ b/fs/ncpfs/dir.c
70694 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
70695 int res, val = 0, len;
70696 __u8 __name[NCP_MAXPATHLEN + 1];
70697
70698 + pax_track_stack();
70699 +
70700 parent = dget_parent(dentry);
70701 dir = parent->d_inode;
70702
70703 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
70704 int error, res, len;
70705 __u8 __name[NCP_MAXPATHLEN + 1];
70706
70707 + pax_track_stack();
70708 +
70709 lock_kernel();
70710 error = -EIO;
70711 if (!ncp_conn_valid(server))
70712 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
70713 int error, result, len;
70714 int opmode;
70715 __u8 __name[NCP_MAXPATHLEN + 1];
70716 -
70717 +
70718 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
70719 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
70720
70721 + pax_track_stack();
70722 +
70723 error = -EIO;
70724 lock_kernel();
70725 if (!ncp_conn_valid(server))
70726 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70727 int error, len;
70728 __u8 __name[NCP_MAXPATHLEN + 1];
70729
70730 + pax_track_stack();
70731 +
70732 DPRINTK("ncp_mkdir: making %s/%s\n",
70733 dentry->d_parent->d_name.name, dentry->d_name.name);
70734
70735 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70736 if (!ncp_conn_valid(server))
70737 goto out;
70738
70739 + pax_track_stack();
70740 +
70741 ncp_age_dentry(server, dentry);
70742 len = sizeof(__name);
70743 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
70744 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
70745 int old_len, new_len;
70746 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
70747
70748 + pax_track_stack();
70749 +
70750 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
70751 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
70752 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
70753 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
70754 index cf98da1..da890a9 100644
70755 --- a/fs/ncpfs/inode.c
70756 +++ b/fs/ncpfs/inode.c
70757 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
70758 #endif
70759 struct ncp_entry_info finfo;
70760
70761 + pax_track_stack();
70762 +
70763 data.wdog_pid = NULL;
70764 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
70765 if (!server)
70766 diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
70767 index 2441d1a..96882c1 100644
70768 --- a/fs/ncpfs/ncplib_kernel.h
70769 +++ b/fs/ncpfs/ncplib_kernel.h
70770 @@ -131,7 +131,7 @@ static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int voln
70771 int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
70772 const unsigned char *, unsigned int, int);
70773 int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
70774 - const unsigned char *, unsigned int, int);
70775 + const unsigned char *, unsigned int, int) __size_overflow(5);
70776
70777 #define NCP_ESC ':'
70778 #define NCP_IO_TABLE(dentry) (NCP_SERVER((dentry)->d_inode)->nls_io)
70779 @@ -147,7 +147,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
70780 int ncp__io2vol(unsigned char *, unsigned int *,
70781 const unsigned char *, unsigned int, int);
70782 int ncp__vol2io(unsigned char *, unsigned int *,
70783 - const unsigned char *, unsigned int, int);
70784 + const unsigned char *, unsigned int, int) __size_overflow(5);
70785
70786 #define NCP_IO_TABLE(dentry) NULL
70787 #define ncp_tolower(t, c) tolower(c)
70788 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
70789 index bfaef7b..e9d03ca 100644
70790 --- a/fs/nfs/inode.c
70791 +++ b/fs/nfs/inode.c
70792 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
70793 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
70794 nfsi->attrtimeo_timestamp = jiffies;
70795
70796 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
70797 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
70798 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
70799 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
70800 else
70801 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
70802 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
70803 }
70804
70805 -static atomic_long_t nfs_attr_generation_counter;
70806 +static atomic_long_unchecked_t nfs_attr_generation_counter;
70807
70808 static unsigned long nfs_read_attr_generation_counter(void)
70809 {
70810 - return atomic_long_read(&nfs_attr_generation_counter);
70811 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
70812 }
70813
70814 unsigned long nfs_inc_attr_generation_counter(void)
70815 {
70816 - return atomic_long_inc_return(&nfs_attr_generation_counter);
70817 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
70818 }
70819
70820 void nfs_fattr_init(struct nfs_fattr *fattr)
70821 diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
70822 index cc2f505..f6a236f 100644
70823 --- a/fs/nfsd/lockd.c
70824 +++ b/fs/nfsd/lockd.c
70825 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
70826 fput(filp);
70827 }
70828
70829 -static struct nlmsvc_binding nfsd_nlm_ops = {
70830 +static const struct nlmsvc_binding nfsd_nlm_ops = {
70831 .fopen = nlm_fopen, /* open file for locking */
70832 .fclose = nlm_fclose, /* close file */
70833 };
70834 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
70835 index cfc3391..dcc083a 100644
70836 --- a/fs/nfsd/nfs4state.c
70837 +++ b/fs/nfsd/nfs4state.c
70838 @@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
70839 unsigned int cmd;
70840 int err;
70841
70842 + pax_track_stack();
70843 +
70844 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
70845 (long long) lock->lk_offset,
70846 (long long) lock->lk_length);
70847 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
70848 index 4a82a96..0d5fb49 100644
70849 --- a/fs/nfsd/nfs4xdr.c
70850 +++ b/fs/nfsd/nfs4xdr.c
70851 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
70852 struct nfsd4_compoundres *resp = rqstp->rq_resp;
70853 u32 minorversion = resp->cstate.minorversion;
70854
70855 + pax_track_stack();
70856 +
70857 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
70858 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
70859 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
70860 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
70861 index 2e09588..596421d 100644
70862 --- a/fs/nfsd/vfs.c
70863 +++ b/fs/nfsd/vfs.c
70864 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
70865 } else {
70866 oldfs = get_fs();
70867 set_fs(KERNEL_DS);
70868 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
70869 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
70870 set_fs(oldfs);
70871 }
70872
70873 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
70874
70875 /* Write the data. */
70876 oldfs = get_fs(); set_fs(KERNEL_DS);
70877 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
70878 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
70879 set_fs(oldfs);
70880 if (host_err < 0)
70881 goto out_nfserr;
70882 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
70883 */
70884
70885 oldfs = get_fs(); set_fs(KERNEL_DS);
70886 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
70887 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
70888 set_fs(oldfs);
70889
70890 if (host_err < 0)
70891 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
70892 index f6af760..d0adf34 100644
70893 --- a/fs/nilfs2/ioctl.c
70894 +++ b/fs/nilfs2/ioctl.c
70895 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
70896 unsigned int cmd, void __user *argp)
70897 {
70898 struct nilfs_argv argv[5];
70899 - const static size_t argsz[5] = {
70900 + static const size_t argsz[5] = {
70901 sizeof(struct nilfs_vdesc),
70902 sizeof(struct nilfs_period),
70903 sizeof(__u64),
70904 @@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
70905 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
70906 goto out_free;
70907
70908 + if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
70909 + goto out_free;
70910 +
70911 len = argv[n].v_size * argv[n].v_nmembs;
70912 base = (void __user *)(unsigned long)argv[n].v_base;
70913 if (len == 0) {
70914 diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
70915 index ad391a8..149a8a1 100644
70916 --- a/fs/nilfs2/the_nilfs.c
70917 +++ b/fs/nilfs2/the_nilfs.c
70918 @@ -478,6 +478,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
70919 brelse(sbh[1]);
70920 sbh[1] = NULL;
70921 sbp[1] = NULL;
70922 + valid[1] = 0;
70923 swp = 0;
70924 }
70925 if (!valid[swp]) {
70926 diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
70927 index 7e54e52..9337248 100644
70928 --- a/fs/notify/dnotify/dnotify.c
70929 +++ b/fs/notify/dnotify/dnotify.c
70930 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
70931 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
70932 }
70933
70934 -static struct fsnotify_ops dnotify_fsnotify_ops = {
70935 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
70936 .handle_event = dnotify_handle_event,
70937 .should_send_event = dnotify_should_send_event,
70938 .free_group_priv = NULL,
70939 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
70940 index b8bf53b..c518688 100644
70941 --- a/fs/notify/notification.c
70942 +++ b/fs/notify/notification.c
70943 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
70944 * get set to 0 so it will never get 'freed'
70945 */
70946 static struct fsnotify_event q_overflow_event;
70947 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70948 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70949
70950 /**
70951 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
70952 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70953 */
70954 u32 fsnotify_get_cookie(void)
70955 {
70956 - return atomic_inc_return(&fsnotify_sync_cookie);
70957 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
70958 }
70959 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
70960
70961 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
70962 index 5a9e344..0f8cd28 100644
70963 --- a/fs/ntfs/dir.c
70964 +++ b/fs/ntfs/dir.c
70965 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
70966 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
70967 ~(s64)(ndir->itype.index.block_size - 1)));
70968 /* Bounds checks. */
70969 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
70970 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
70971 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
70972 "inode 0x%lx or driver bug.", vdir->i_ino);
70973 goto err_out;
70974 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
70975 index 663c0e3..b6868e9 100644
70976 --- a/fs/ntfs/file.c
70977 +++ b/fs/ntfs/file.c
70978 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
70979 #endif /* NTFS_RW */
70980 };
70981
70982 -const struct file_operations ntfs_empty_file_ops = {};
70983 +const struct file_operations ntfs_empty_file_ops __read_only;
70984
70985 -const struct inode_operations ntfs_empty_inode_ops = {};
70986 +const struct inode_operations ntfs_empty_inode_ops __read_only;
70987 diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
70988 index 1cd2934..880b5d2 100644
70989 --- a/fs/ocfs2/cluster/masklog.c
70990 +++ b/fs/ocfs2/cluster/masklog.c
70991 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
70992 return mlog_mask_store(mlog_attr->mask, buf, count);
70993 }
70994
70995 -static struct sysfs_ops mlog_attr_ops = {
70996 +static const struct sysfs_ops mlog_attr_ops = {
70997 .show = mlog_show,
70998 .store = mlog_store,
70999 };
71000 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
71001 index ac10f83..2cd2607 100644
71002 --- a/fs/ocfs2/localalloc.c
71003 +++ b/fs/ocfs2/localalloc.c
71004 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
71005 goto bail;
71006 }
71007
71008 - atomic_inc(&osb->alloc_stats.moves);
71009 + atomic_inc_unchecked(&osb->alloc_stats.moves);
71010
71011 status = 0;
71012 bail:
71013 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
71014 index f010b22..9f9ed34 100644
71015 --- a/fs/ocfs2/namei.c
71016 +++ b/fs/ocfs2/namei.c
71017 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
71018 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
71019 struct ocfs2_dir_lookup_result target_insert = { NULL, };
71020
71021 + pax_track_stack();
71022 +
71023 /* At some point it might be nice to break this function up a
71024 * bit. */
71025
71026 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
71027 index d963d86..914cfbd 100644
71028 --- a/fs/ocfs2/ocfs2.h
71029 +++ b/fs/ocfs2/ocfs2.h
71030 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
71031
71032 struct ocfs2_alloc_stats
71033 {
71034 - atomic_t moves;
71035 - atomic_t local_data;
71036 - atomic_t bitmap_data;
71037 - atomic_t bg_allocs;
71038 - atomic_t bg_extends;
71039 + atomic_unchecked_t moves;
71040 + atomic_unchecked_t local_data;
71041 + atomic_unchecked_t bitmap_data;
71042 + atomic_unchecked_t bg_allocs;
71043 + atomic_unchecked_t bg_extends;
71044 };
71045
71046 enum ocfs2_local_alloc_state
71047 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
71048 index 79b5dac..d322952 100644
71049 --- a/fs/ocfs2/suballoc.c
71050 +++ b/fs/ocfs2/suballoc.c
71051 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
71052 mlog_errno(status);
71053 goto bail;
71054 }
71055 - atomic_inc(&osb->alloc_stats.bg_extends);
71056 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
71057
71058 /* You should never ask for this much metadata */
71059 BUG_ON(bits_wanted >
71060 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
71061 mlog_errno(status);
71062 goto bail;
71063 }
71064 - atomic_inc(&osb->alloc_stats.bg_allocs);
71065 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
71066
71067 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
71068 ac->ac_bits_given += (*num_bits);
71069 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
71070 mlog_errno(status);
71071 goto bail;
71072 }
71073 - atomic_inc(&osb->alloc_stats.bg_allocs);
71074 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
71075
71076 BUG_ON(num_bits != 1);
71077
71078 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
71079 cluster_start,
71080 num_clusters);
71081 if (!status)
71082 - atomic_inc(&osb->alloc_stats.local_data);
71083 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
71084 } else {
71085 if (min_clusters > (osb->bitmap_cpg - 1)) {
71086 /* The only paths asking for contiguousness
71087 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
71088 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
71089 bg_blkno,
71090 bg_bit_off);
71091 - atomic_inc(&osb->alloc_stats.bitmap_data);
71092 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
71093 }
71094 }
71095 if (status < 0) {
71096 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
71097 index 9f55be4..a3f8048 100644
71098 --- a/fs/ocfs2/super.c
71099 +++ b/fs/ocfs2/super.c
71100 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
71101 "%10s => GlobalAllocs: %d LocalAllocs: %d "
71102 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
71103 "Stats",
71104 - atomic_read(&osb->alloc_stats.bitmap_data),
71105 - atomic_read(&osb->alloc_stats.local_data),
71106 - atomic_read(&osb->alloc_stats.bg_allocs),
71107 - atomic_read(&osb->alloc_stats.moves),
71108 - atomic_read(&osb->alloc_stats.bg_extends));
71109 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
71110 + atomic_read_unchecked(&osb->alloc_stats.local_data),
71111 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
71112 + atomic_read_unchecked(&osb->alloc_stats.moves),
71113 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
71114
71115 out += snprintf(buf + out, len - out,
71116 "%10s => State: %u Descriptor: %llu Size: %u bits "
71117 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
71118 spin_lock_init(&osb->osb_xattr_lock);
71119 ocfs2_init_inode_steal_slot(osb);
71120
71121 - atomic_set(&osb->alloc_stats.moves, 0);
71122 - atomic_set(&osb->alloc_stats.local_data, 0);
71123 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
71124 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
71125 - atomic_set(&osb->alloc_stats.bg_extends, 0);
71126 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
71127 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
71128 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
71129 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
71130 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
71131
71132 /* Copy the blockcheck stats from the superblock probe */
71133 osb->osb_ecc_stats = *stats;
71134 diff --git a/fs/open.c b/fs/open.c
71135 index 4f01e06..2a8057a 100644
71136 --- a/fs/open.c
71137 +++ b/fs/open.c
71138 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
71139 error = locks_verify_truncate(inode, NULL, length);
71140 if (!error)
71141 error = security_path_truncate(&path, length, 0);
71142 +
71143 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
71144 + error = -EACCES;
71145 +
71146 if (!error) {
71147 vfs_dq_init(inode);
71148 error = do_truncate(path.dentry, length, 0, NULL);
71149 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
71150 if (__mnt_is_readonly(path.mnt))
71151 res = -EROFS;
71152
71153 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
71154 + res = -EACCES;
71155 +
71156 out_path_release:
71157 path_put(&path);
71158 out:
71159 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
71160 if (error)
71161 goto dput_and_out;
71162
71163 + gr_log_chdir(path.dentry, path.mnt);
71164 +
71165 set_fs_pwd(current->fs, &path);
71166
71167 dput_and_out:
71168 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
71169 goto out_putf;
71170
71171 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
71172 +
71173 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
71174 + error = -EPERM;
71175 +
71176 + if (!error)
71177 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
71178 +
71179 if (!error)
71180 set_fs_pwd(current->fs, &file->f_path);
71181 out_putf:
71182 @@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
71183 if (!capable(CAP_SYS_CHROOT))
71184 goto dput_and_out;
71185
71186 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
71187 + goto dput_and_out;
71188 +
71189 set_fs_root(current->fs, &path);
71190 +
71191 + gr_handle_chroot_chdir(&path);
71192 +
71193 error = 0;
71194 dput_and_out:
71195 path_put(&path);
71196 @@ -596,66 +618,57 @@ out:
71197 return error;
71198 }
71199
71200 +static int chmod_common(struct path *path, umode_t mode)
71201 +{
71202 + struct inode *inode = path->dentry->d_inode;
71203 + struct iattr newattrs;
71204 + int error;
71205 +
71206 + error = mnt_want_write(path->mnt);
71207 + if (error)
71208 + return error;
71209 + mutex_lock(&inode->i_mutex);
71210 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
71211 + error = -EACCES;
71212 + goto out_unlock;
71213 + }
71214 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
71215 + error = -EPERM;
71216 + goto out_unlock;
71217 + }
71218 + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71219 + newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71220 + error = notify_change(path->dentry, &newattrs);
71221 +out_unlock:
71222 + mutex_unlock(&inode->i_mutex);
71223 + mnt_drop_write(path->mnt);
71224 + return error;
71225 +}
71226 +
71227 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
71228 {
71229 - struct inode * inode;
71230 - struct dentry * dentry;
71231 struct file * file;
71232 int err = -EBADF;
71233 - struct iattr newattrs;
71234
71235 file = fget(fd);
71236 - if (!file)
71237 - goto out;
71238 -
71239 - dentry = file->f_path.dentry;
71240 - inode = dentry->d_inode;
71241 -
71242 - audit_inode(NULL, dentry);
71243 -
71244 - err = mnt_want_write_file(file);
71245 - if (err)
71246 - goto out_putf;
71247 - mutex_lock(&inode->i_mutex);
71248 - if (mode == (mode_t) -1)
71249 - mode = inode->i_mode;
71250 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71251 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71252 - err = notify_change(dentry, &newattrs);
71253 - mutex_unlock(&inode->i_mutex);
71254 - mnt_drop_write(file->f_path.mnt);
71255 -out_putf:
71256 - fput(file);
71257 -out:
71258 + if (file) {
71259 + audit_inode(NULL, file->f_path.dentry);
71260 + err = chmod_common(&file->f_path, mode);
71261 + fput(file);
71262 + }
71263 return err;
71264 }
71265
71266 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
71267 {
71268 struct path path;
71269 - struct inode *inode;
71270 int error;
71271 - struct iattr newattrs;
71272
71273 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
71274 - if (error)
71275 - goto out;
71276 - inode = path.dentry->d_inode;
71277 -
71278 - error = mnt_want_write(path.mnt);
71279 - if (error)
71280 - goto dput_and_out;
71281 - mutex_lock(&inode->i_mutex);
71282 - if (mode == (mode_t) -1)
71283 - mode = inode->i_mode;
71284 - newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71285 - newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71286 - error = notify_change(path.dentry, &newattrs);
71287 - mutex_unlock(&inode->i_mutex);
71288 - mnt_drop_write(path.mnt);
71289 -dput_and_out:
71290 - path_put(&path);
71291 -out:
71292 + if (!error) {
71293 + error = chmod_common(&path, mode);
71294 + path_put(&path);
71295 + }
71296 return error;
71297 }
71298
71299 @@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
71300 return sys_fchmodat(AT_FDCWD, filename, mode);
71301 }
71302
71303 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
71304 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
71305 {
71306 struct inode *inode = dentry->d_inode;
71307 int error;
71308 struct iattr newattrs;
71309
71310 + if (!gr_acl_handle_chown(dentry, mnt))
71311 + return -EACCES;
71312 +
71313 newattrs.ia_valid = ATTR_CTIME;
71314 if (user != (uid_t) -1) {
71315 newattrs.ia_valid |= ATTR_UID;
71316 @@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
71317 error = mnt_want_write(path.mnt);
71318 if (error)
71319 goto out_release;
71320 - error = chown_common(path.dentry, user, group);
71321 + error = chown_common(path.dentry, user, group, path.mnt);
71322 mnt_drop_write(path.mnt);
71323 out_release:
71324 path_put(&path);
71325 @@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
71326 error = mnt_want_write(path.mnt);
71327 if (error)
71328 goto out_release;
71329 - error = chown_common(path.dentry, user, group);
71330 + error = chown_common(path.dentry, user, group, path.mnt);
71331 mnt_drop_write(path.mnt);
71332 out_release:
71333 path_put(&path);
71334 @@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
71335 error = mnt_want_write(path.mnt);
71336 if (error)
71337 goto out_release;
71338 - error = chown_common(path.dentry, user, group);
71339 + error = chown_common(path.dentry, user, group, path.mnt);
71340 mnt_drop_write(path.mnt);
71341 out_release:
71342 path_put(&path);
71343 @@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
71344 goto out_fput;
71345 dentry = file->f_path.dentry;
71346 audit_inode(NULL, dentry);
71347 - error = chown_common(dentry, user, group);
71348 + error = chown_common(dentry, user, group, file->f_path.mnt);
71349 mnt_drop_write(file->f_path.mnt);
71350 out_fput:
71351 fput(file);
71352 @@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
71353 if (!IS_ERR(tmp)) {
71354 fd = get_unused_fd_flags(flags);
71355 if (fd >= 0) {
71356 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
71357 + struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
71358 if (IS_ERR(f)) {
71359 put_unused_fd(fd);
71360 fd = PTR_ERR(f);
71361 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
71362 index 6ab70f4..f4103d1 100644
71363 --- a/fs/partitions/efi.c
71364 +++ b/fs/partitions/efi.c
71365 @@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
71366 if (!bdev || !gpt)
71367 return NULL;
71368
71369 + if (!le32_to_cpu(gpt->num_partition_entries))
71370 + return NULL;
71371 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
71372 + if (!pte)
71373 + return NULL;
71374 +
71375 count = le32_to_cpu(gpt->num_partition_entries) *
71376 le32_to_cpu(gpt->sizeof_partition_entry);
71377 - if (!count)
71378 - return NULL;
71379 - pte = kzalloc(count, GFP_KERNEL);
71380 - if (!pte)
71381 - return NULL;
71382 -
71383 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
71384 (u8 *) pte,
71385 count) < count) {
71386 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
71387 index dd6efdb..3babc6c 100644
71388 --- a/fs/partitions/ldm.c
71389 +++ b/fs/partitions/ldm.c
71390 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
71391 ldm_error ("A VBLK claims to have %d parts.", num);
71392 return false;
71393 }
71394 +
71395 if (rec >= num) {
71396 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
71397 return false;
71398 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
71399 goto found;
71400 }
71401
71402 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
71403 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
71404 if (!f) {
71405 ldm_crit ("Out of memory.");
71406 return false;
71407 diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
71408 index 5765198..7f8e9e0 100644
71409 --- a/fs/partitions/mac.c
71410 +++ b/fs/partitions/mac.c
71411 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
71412 return 0; /* not a MacOS disk */
71413 }
71414 blocks_in_map = be32_to_cpu(part->map_count);
71415 - if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
71416 - put_dev_sector(sect);
71417 - return 0;
71418 - }
71419 printk(" [mac]");
71420 + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
71421 + put_dev_sector(sect);
71422 + return 0;
71423 + }
71424 for (slot = 1; slot <= blocks_in_map; ++slot) {
71425 int pos = slot * secsize;
71426 put_dev_sector(sect);
71427 diff --git a/fs/pipe.c b/fs/pipe.c
71428 index d0cc080..8a6f211 100644
71429 --- a/fs/pipe.c
71430 +++ b/fs/pipe.c
71431 @@ -401,9 +401,9 @@ redo:
71432 }
71433 if (bufs) /* More to do? */
71434 continue;
71435 - if (!pipe->writers)
71436 + if (!atomic_read(&pipe->writers))
71437 break;
71438 - if (!pipe->waiting_writers) {
71439 + if (!atomic_read(&pipe->waiting_writers)) {
71440 /* syscall merging: Usually we must not sleep
71441 * if O_NONBLOCK is set, or if we got some data.
71442 * But if a writer sleeps in kernel space, then
71443 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
71444 mutex_lock(&inode->i_mutex);
71445 pipe = inode->i_pipe;
71446
71447 - if (!pipe->readers) {
71448 + if (!atomic_read(&pipe->readers)) {
71449 send_sig(SIGPIPE, current, 0);
71450 ret = -EPIPE;
71451 goto out;
71452 @@ -511,7 +511,7 @@ redo1:
71453 for (;;) {
71454 int bufs;
71455
71456 - if (!pipe->readers) {
71457 + if (!atomic_read(&pipe->readers)) {
71458 send_sig(SIGPIPE, current, 0);
71459 if (!ret)
71460 ret = -EPIPE;
71461 @@ -597,9 +597,9 @@ redo2:
71462 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
71463 do_wakeup = 0;
71464 }
71465 - pipe->waiting_writers++;
71466 + atomic_inc(&pipe->waiting_writers);
71467 pipe_wait(pipe);
71468 - pipe->waiting_writers--;
71469 + atomic_dec(&pipe->waiting_writers);
71470 }
71471 out:
71472 mutex_unlock(&inode->i_mutex);
71473 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
71474 mask = 0;
71475 if (filp->f_mode & FMODE_READ) {
71476 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
71477 - if (!pipe->writers && filp->f_version != pipe->w_counter)
71478 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
71479 mask |= POLLHUP;
71480 }
71481
71482 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
71483 * Most Unices do not set POLLERR for FIFOs but on Linux they
71484 * behave exactly like pipes for poll().
71485 */
71486 - if (!pipe->readers)
71487 + if (!atomic_read(&pipe->readers))
71488 mask |= POLLERR;
71489 }
71490
71491 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
71492
71493 mutex_lock(&inode->i_mutex);
71494 pipe = inode->i_pipe;
71495 - pipe->readers -= decr;
71496 - pipe->writers -= decw;
71497 + atomic_sub(decr, &pipe->readers);
71498 + atomic_sub(decw, &pipe->writers);
71499
71500 - if (!pipe->readers && !pipe->writers) {
71501 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
71502 free_pipe_info(inode);
71503 } else {
71504 wake_up_interruptible_sync(&pipe->wait);
71505 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
71506
71507 if (inode->i_pipe) {
71508 ret = 0;
71509 - inode->i_pipe->readers++;
71510 + atomic_inc(&inode->i_pipe->readers);
71511 }
71512
71513 mutex_unlock(&inode->i_mutex);
71514 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
71515
71516 if (inode->i_pipe) {
71517 ret = 0;
71518 - inode->i_pipe->writers++;
71519 + atomic_inc(&inode->i_pipe->writers);
71520 }
71521
71522 mutex_unlock(&inode->i_mutex);
71523 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
71524 if (inode->i_pipe) {
71525 ret = 0;
71526 if (filp->f_mode & FMODE_READ)
71527 - inode->i_pipe->readers++;
71528 + atomic_inc(&inode->i_pipe->readers);
71529 if (filp->f_mode & FMODE_WRITE)
71530 - inode->i_pipe->writers++;
71531 + atomic_inc(&inode->i_pipe->writers);
71532 }
71533
71534 mutex_unlock(&inode->i_mutex);
71535 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
71536 inode->i_pipe = NULL;
71537 }
71538
71539 -static struct vfsmount *pipe_mnt __read_mostly;
71540 +struct vfsmount *pipe_mnt __read_mostly;
71541 static int pipefs_delete_dentry(struct dentry *dentry)
71542 {
71543 /*
71544 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
71545 goto fail_iput;
71546 inode->i_pipe = pipe;
71547
71548 - pipe->readers = pipe->writers = 1;
71549 + atomic_set(&pipe->readers, 1);
71550 + atomic_set(&pipe->writers, 1);
71551 inode->i_fop = &rdwr_pipefifo_fops;
71552
71553 /*
71554 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
71555 index 50f8f06..c5755df 100644
71556 --- a/fs/proc/Kconfig
71557 +++ b/fs/proc/Kconfig
71558 @@ -30,12 +30,12 @@ config PROC_FS
71559
71560 config PROC_KCORE
71561 bool "/proc/kcore support" if !ARM
71562 - depends on PROC_FS && MMU
71563 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
71564
71565 config PROC_VMCORE
71566 bool "/proc/vmcore support (EXPERIMENTAL)"
71567 - depends on PROC_FS && CRASH_DUMP
71568 - default y
71569 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
71570 + default n
71571 help
71572 Exports the dump image of crashed kernel in ELF format.
71573
71574 @@ -59,8 +59,8 @@ config PROC_SYSCTL
71575 limited in memory.
71576
71577 config PROC_PAGE_MONITOR
71578 - default y
71579 - depends on PROC_FS && MMU
71580 + default n
71581 + depends on PROC_FS && MMU && !GRKERNSEC
71582 bool "Enable /proc page monitoring" if EMBEDDED
71583 help
71584 Various /proc files exist to monitor process memory utilization:
71585 diff --git a/fs/proc/array.c b/fs/proc/array.c
71586 index c5ef152..28c94f7 100644
71587 --- a/fs/proc/array.c
71588 +++ b/fs/proc/array.c
71589 @@ -60,6 +60,7 @@
71590 #include <linux/tty.h>
71591 #include <linux/string.h>
71592 #include <linux/mman.h>
71593 +#include <linux/grsecurity.h>
71594 #include <linux/proc_fs.h>
71595 #include <linux/ioport.h>
71596 #include <linux/uaccess.h>
71597 @@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
71598 p->nivcsw);
71599 }
71600
71601 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71602 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
71603 +{
71604 + if (p->mm)
71605 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
71606 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
71607 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
71608 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
71609 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
71610 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
71611 + else
71612 + seq_printf(m, "PaX:\t-----\n");
71613 +}
71614 +#endif
71615 +
71616 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71617 struct pid *pid, struct task_struct *task)
71618 {
71619 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71620 task_cap(m, task);
71621 cpuset_task_status_allowed(m, task);
71622 task_context_switch_counts(m, task);
71623 +
71624 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71625 + task_pax(m, task);
71626 +#endif
71627 +
71628 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
71629 + task_grsec_rbac(m, task);
71630 +#endif
71631 +
71632 return 0;
71633 }
71634
71635 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71636 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
71637 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
71638 + _mm->pax_flags & MF_PAX_SEGMEXEC))
71639 +#endif
71640 +
71641 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71642 struct pid *pid, struct task_struct *task, int whole)
71643 {
71644 @@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71645 cputime_t cutime, cstime, utime, stime;
71646 cputime_t cgtime, gtime;
71647 unsigned long rsslim = 0;
71648 - char tcomm[sizeof(task->comm)];
71649 + char tcomm[sizeof(task->comm)] = { 0 };
71650 unsigned long flags;
71651
71652 + pax_track_stack();
71653 +
71654 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71655 + if (current->exec_id != m->exec_id) {
71656 + gr_log_badprocpid("stat");
71657 + return 0;
71658 + }
71659 +#endif
71660 +
71661 state = *get_task_state(task);
71662 vsize = eip = esp = 0;
71663 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
71664 @@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71665 gtime = task_gtime(task);
71666 }
71667
71668 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71669 + if (PAX_RAND_FLAGS(mm)) {
71670 + eip = 0;
71671 + esp = 0;
71672 + wchan = 0;
71673 + }
71674 +#endif
71675 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71676 + wchan = 0;
71677 + eip =0;
71678 + esp =0;
71679 +#endif
71680 +
71681 /* scale priority and nice values from timeslices to -20..20 */
71682 /* to make it look like a "normal" Unix priority/nice value */
71683 priority = task_prio(task);
71684 @@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71685 vsize,
71686 mm ? get_mm_rss(mm) : 0,
71687 rsslim,
71688 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71689 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
71690 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
71691 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
71692 +#else
71693 mm ? (permitted ? mm->start_code : 1) : 0,
71694 mm ? (permitted ? mm->end_code : 1) : 0,
71695 (permitted && mm) ? mm->start_stack : 0,
71696 +#endif
71697 esp,
71698 eip,
71699 /* The signal information here is obsolete.
71700 @@ -517,8 +576,16 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71701 struct pid *pid, struct task_struct *task)
71702 {
71703 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
71704 - struct mm_struct *mm = get_task_mm(task);
71705 + struct mm_struct *mm;
71706
71707 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71708 + if (current->exec_id != m->exec_id) {
71709 + gr_log_badprocpid("statm");
71710 + return 0;
71711 + }
71712 +#endif
71713 +
71714 + mm = get_task_mm(task);
71715 if (mm) {
71716 size = task_statm(mm, &shared, &text, &data, &resident);
71717 mmput(mm);
71718 @@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71719
71720 return 0;
71721 }
71722 +
71723 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
71724 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
71725 +{
71726 + u32 curr_ip = 0;
71727 + unsigned long flags;
71728 +
71729 + if (lock_task_sighand(task, &flags)) {
71730 + curr_ip = task->signal->curr_ip;
71731 + unlock_task_sighand(task, &flags);
71732 + }
71733 +
71734 + return sprintf(buffer, "%pI4\n", &curr_ip);
71735 +}
71736 +#endif
71737 diff --git a/fs/proc/base.c b/fs/proc/base.c
71738 index 67f7dc0..a86ad9a 100644
71739 --- a/fs/proc/base.c
71740 +++ b/fs/proc/base.c
71741 @@ -102,6 +102,22 @@ struct pid_entry {
71742 union proc_op op;
71743 };
71744
71745 +struct getdents_callback {
71746 + struct linux_dirent __user * current_dir;
71747 + struct linux_dirent __user * previous;
71748 + struct file * file;
71749 + int count;
71750 + int error;
71751 +};
71752 +
71753 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
71754 + loff_t offset, u64 ino, unsigned int d_type)
71755 +{
71756 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
71757 + buf->error = -EINVAL;
71758 + return 0;
71759 +}
71760 +
71761 #define NOD(NAME, MODE, IOP, FOP, OP) { \
71762 .name = (NAME), \
71763 .len = sizeof(NAME) - 1, \
71764 @@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
71765 if (task == current)
71766 return 0;
71767
71768 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
71769 + return -EPERM;
71770 +
71771 /*
71772 * If current is actively ptrace'ing, and would also be
71773 * permitted to freshly attach with ptrace now, permit it.
71774 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
71775 if (!mm->arg_end)
71776 goto out_mm; /* Shh! No looking before we're done */
71777
71778 + if (gr_acl_handle_procpidmem(task))
71779 + goto out_mm;
71780 +
71781 len = mm->arg_end - mm->arg_start;
71782
71783 if (len > PAGE_SIZE)
71784 @@ -287,12 +309,28 @@ out:
71785 return res;
71786 }
71787
71788 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71789 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
71790 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
71791 + _mm->pax_flags & MF_PAX_SEGMEXEC))
71792 +#endif
71793 +
71794 static int proc_pid_auxv(struct task_struct *task, char *buffer)
71795 {
71796 int res = 0;
71797 struct mm_struct *mm = get_task_mm(task);
71798 if (mm) {
71799 unsigned int nwords = 0;
71800 +
71801 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71802 + /* allow if we're currently ptracing this task */
71803 + if (PAX_RAND_FLAGS(mm) &&
71804 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
71805 + mmput(mm);
71806 + return 0;
71807 + }
71808 +#endif
71809 +
71810 do {
71811 nwords += 2;
71812 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
71813 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
71814 }
71815
71816
71817 -#ifdef CONFIG_KALLSYMS
71818 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71819 /*
71820 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
71821 * Returns the resolved symbol. If that fails, simply return the address.
71822 @@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
71823 mutex_unlock(&task->cred_guard_mutex);
71824 }
71825
71826 -#ifdef CONFIG_STACKTRACE
71827 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71828
71829 #define MAX_STACK_TRACE_DEPTH 64
71830
71831 @@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
71832 return count;
71833 }
71834
71835 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
71836 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
71837 static int proc_pid_syscall(struct task_struct *task, char *buffer)
71838 {
71839 long nr;
71840 @@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
71841 /************************************************************************/
71842
71843 /* permission checks */
71844 -static int proc_fd_access_allowed(struct inode *inode)
71845 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
71846 {
71847 struct task_struct *task;
71848 int allowed = 0;
71849 @@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
71850 */
71851 task = get_proc_task(inode);
71852 if (task) {
71853 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
71854 + if (log)
71855 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
71856 + else
71857 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
71858 put_task_struct(task);
71859 }
71860 return allowed;
71861 @@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
71862 static int mem_open(struct inode* inode, struct file* file)
71863 {
71864 file->private_data = (void*)((long)current->self_exec_id);
71865 +
71866 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71867 + file->f_version = current->exec_id;
71868 +#endif
71869 +
71870 return 0;
71871 }
71872
71873 +static int task_dumpable(struct task_struct *task);
71874 +
71875 static ssize_t mem_read(struct file * file, char __user * buf,
71876 size_t count, loff_t *ppos)
71877 {
71878 @@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
71879 int ret = -ESRCH;
71880 struct mm_struct *mm;
71881
71882 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71883 + if (file->f_version != current->exec_id) {
71884 + gr_log_badprocpid("mem");
71885 + return 0;
71886 + }
71887 +#endif
71888 +
71889 if (!task)
71890 goto out_no_task;
71891
71892 @@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
71893 if (!task)
71894 goto out_no_task;
71895
71896 + if (gr_acl_handle_procpidmem(task))
71897 + goto out;
71898 +
71899 if (!ptrace_may_access(task, PTRACE_MODE_READ))
71900 goto out;
71901
71902 @@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
71903 path_put(&nd->path);
71904
71905 /* Are we allowed to snoop on the tasks file descriptors? */
71906 - if (!proc_fd_access_allowed(inode))
71907 + if (!proc_fd_access_allowed(inode,0))
71908 goto out;
71909
71910 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
71911 @@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
71912 struct path path;
71913
71914 /* Are we allowed to snoop on the tasks file descriptors? */
71915 - if (!proc_fd_access_allowed(inode))
71916 - goto out;
71917 + /* logging this is needed for learning on chromium to work properly,
71918 + but we don't want to flood the logs from 'ps' which does a readlink
71919 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
71920 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
71921 + */
71922 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
71923 + if (!proc_fd_access_allowed(inode,0))
71924 + goto out;
71925 + } else {
71926 + if (!proc_fd_access_allowed(inode,1))
71927 + goto out;
71928 + }
71929
71930 error = PROC_I(inode)->op.proc_get_link(inode, &path);
71931 if (error)
71932 @@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
71933 rcu_read_lock();
71934 cred = __task_cred(task);
71935 inode->i_uid = cred->euid;
71936 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71937 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
71938 +#else
71939 inode->i_gid = cred->egid;
71940 +#endif
71941 rcu_read_unlock();
71942 }
71943 security_task_to_inode(task, inode);
71944 @@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
71945 struct inode *inode = dentry->d_inode;
71946 struct task_struct *task;
71947 const struct cred *cred;
71948 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71949 + const struct cred *tmpcred = current_cred();
71950 +#endif
71951
71952 generic_fillattr(inode, stat);
71953
71954 @@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
71955 stat->uid = 0;
71956 stat->gid = 0;
71957 task = pid_task(proc_pid(inode), PIDTYPE_PID);
71958 +
71959 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
71960 + rcu_read_unlock();
71961 + return -ENOENT;
71962 + }
71963 +
71964 if (task) {
71965 + cred = __task_cred(task);
71966 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71967 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
71968 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71969 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
71970 +#endif
71971 + ) {
71972 +#endif
71973 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
71974 +#ifdef CONFIG_GRKERNSEC_PROC_USER
71975 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
71976 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71977 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
71978 +#endif
71979 task_dumpable(task)) {
71980 - cred = __task_cred(task);
71981 stat->uid = cred->euid;
71982 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71983 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
71984 +#else
71985 stat->gid = cred->egid;
71986 +#endif
71987 }
71988 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71989 + } else {
71990 + rcu_read_unlock();
71991 + return -ENOENT;
71992 + }
71993 +#endif
71994 }
71995 rcu_read_unlock();
71996 return 0;
71997 @@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
71998
71999 if (task) {
72000 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
72001 +#ifdef CONFIG_GRKERNSEC_PROC_USER
72002 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
72003 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72004 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
72005 +#endif
72006 task_dumpable(task)) {
72007 rcu_read_lock();
72008 cred = __task_cred(task);
72009 inode->i_uid = cred->euid;
72010 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72011 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72012 +#else
72013 inode->i_gid = cred->egid;
72014 +#endif
72015 rcu_read_unlock();
72016 } else {
72017 inode->i_uid = 0;
72018 @@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
72019 int fd = proc_fd(inode);
72020
72021 if (task) {
72022 - files = get_files_struct(task);
72023 + if (!gr_acl_handle_procpidmem(task))
72024 + files = get_files_struct(task);
72025 put_task_struct(task);
72026 }
72027 if (files) {
72028 @@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
72029 static int proc_fd_permission(struct inode *inode, int mask)
72030 {
72031 int rv;
72032 + struct task_struct *task;
72033
72034 rv = generic_permission(inode, mask, NULL);
72035 - if (rv == 0)
72036 - return 0;
72037 +
72038 if (task_pid(current) == proc_pid(inode))
72039 rv = 0;
72040 +
72041 + task = get_proc_task(inode);
72042 + if (task == NULL)
72043 + return rv;
72044 +
72045 + if (gr_acl_handle_procpidmem(task))
72046 + rv = -EACCES;
72047 +
72048 + put_task_struct(task);
72049 +
72050 return rv;
72051 }
72052
72053 @@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
72054 if (!task)
72055 goto out_no_task;
72056
72057 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
72058 + goto out;
72059 +
72060 /*
72061 * Yes, it does not scale. And it should not. Don't add
72062 * new entries into /proc/<tgid>/ without very good reasons.
72063 @@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
72064 if (!task)
72065 goto out_no_task;
72066
72067 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
72068 + goto out;
72069 +
72070 ret = 0;
72071 i = filp->f_pos;
72072 switch (i) {
72073 @@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
72074 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
72075 void *cookie)
72076 {
72077 - char *s = nd_get_link(nd);
72078 + const char *s = nd_get_link(nd);
72079 if (!IS_ERR(s))
72080 __putname(s);
72081 }
72082 @@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
72083 #ifdef CONFIG_SCHED_DEBUG
72084 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
72085 #endif
72086 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
72087 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
72088 INF("syscall", S_IRUGO, proc_pid_syscall),
72089 #endif
72090 INF("cmdline", S_IRUGO, proc_pid_cmdline),
72091 @@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
72092 #ifdef CONFIG_SECURITY
72093 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
72094 #endif
72095 -#ifdef CONFIG_KALLSYMS
72096 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72097 INF("wchan", S_IRUGO, proc_pid_wchan),
72098 #endif
72099 -#ifdef CONFIG_STACKTRACE
72100 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72101 ONE("stack", S_IRUGO, proc_pid_stack),
72102 #endif
72103 #ifdef CONFIG_SCHEDSTATS
72104 @@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
72105 #ifdef CONFIG_TASK_IO_ACCOUNTING
72106 INF("io", S_IRUSR, proc_tgid_io_accounting),
72107 #endif
72108 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
72109 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
72110 +#endif
72111 };
72112
72113 static int proc_tgid_base_readdir(struct file * filp,
72114 @@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
72115 if (!inode)
72116 goto out;
72117
72118 +#ifdef CONFIG_GRKERNSEC_PROC_USER
72119 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
72120 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72121 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72122 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
72123 +#else
72124 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
72125 +#endif
72126 inode->i_op = &proc_tgid_base_inode_operations;
72127 inode->i_fop = &proc_tgid_base_operations;
72128 inode->i_flags|=S_IMMUTABLE;
72129 @@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
72130 if (!task)
72131 goto out;
72132
72133 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
72134 + goto out_put_task;
72135 +
72136 result = proc_pid_instantiate(dir, dentry, task, NULL);
72137 +out_put_task:
72138 put_task_struct(task);
72139 out:
72140 return result;
72141 @@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
72142 {
72143 unsigned int nr;
72144 struct task_struct *reaper;
72145 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72146 + const struct cred *tmpcred = current_cred();
72147 + const struct cred *itercred;
72148 +#endif
72149 + filldir_t __filldir = filldir;
72150 struct tgid_iter iter;
72151 struct pid_namespace *ns;
72152
72153 @@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
72154 for (iter = next_tgid(ns, iter);
72155 iter.task;
72156 iter.tgid += 1, iter = next_tgid(ns, iter)) {
72157 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72158 + rcu_read_lock();
72159 + itercred = __task_cred(iter.task);
72160 +#endif
72161 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
72162 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72163 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
72164 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72165 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
72166 +#endif
72167 + )
72168 +#endif
72169 + )
72170 + __filldir = &gr_fake_filldir;
72171 + else
72172 + __filldir = filldir;
72173 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72174 + rcu_read_unlock();
72175 +#endif
72176 filp->f_pos = iter.tgid + TGID_OFFSET;
72177 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
72178 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
72179 put_task_struct(iter.task);
72180 goto out;
72181 }
72182 @@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
72183 #ifdef CONFIG_SCHED_DEBUG
72184 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
72185 #endif
72186 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
72187 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
72188 INF("syscall", S_IRUGO, proc_pid_syscall),
72189 #endif
72190 INF("cmdline", S_IRUGO, proc_pid_cmdline),
72191 @@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
72192 #ifdef CONFIG_SECURITY
72193 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
72194 #endif
72195 -#ifdef CONFIG_KALLSYMS
72196 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72197 INF("wchan", S_IRUGO, proc_pid_wchan),
72198 #endif
72199 -#ifdef CONFIG_STACKTRACE
72200 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72201 ONE("stack", S_IRUGO, proc_pid_stack),
72202 #endif
72203 #ifdef CONFIG_SCHEDSTATS
72204 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
72205 index 82676e3..5f8518a 100644
72206 --- a/fs/proc/cmdline.c
72207 +++ b/fs/proc/cmdline.c
72208 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
72209
72210 static int __init proc_cmdline_init(void)
72211 {
72212 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
72213 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
72214 +#else
72215 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
72216 +#endif
72217 return 0;
72218 }
72219 module_init(proc_cmdline_init);
72220 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
72221 index 59ee7da..469b4b6 100644
72222 --- a/fs/proc/devices.c
72223 +++ b/fs/proc/devices.c
72224 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
72225
72226 static int __init proc_devices_init(void)
72227 {
72228 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
72229 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
72230 +#else
72231 proc_create("devices", 0, NULL, &proc_devinfo_operations);
72232 +#endif
72233 return 0;
72234 }
72235 module_init(proc_devices_init);
72236 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
72237 index d78ade3..81767f9 100644
72238 --- a/fs/proc/inode.c
72239 +++ b/fs/proc/inode.c
72240 @@ -18,12 +18,19 @@
72241 #include <linux/module.h>
72242 #include <linux/smp_lock.h>
72243 #include <linux/sysctl.h>
72244 +#include <linux/grsecurity.h>
72245
72246 #include <asm/system.h>
72247 #include <asm/uaccess.h>
72248
72249 #include "internal.h"
72250
72251 +#ifdef CONFIG_PROC_SYSCTL
72252 +extern const struct inode_operations proc_sys_inode_operations;
72253 +extern const struct inode_operations proc_sys_dir_operations;
72254 +#endif
72255 +
72256 +
72257 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
72258 {
72259 atomic_inc(&de->count);
72260 @@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
72261 de_put(de);
72262 if (PROC_I(inode)->sysctl)
72263 sysctl_head_put(PROC_I(inode)->sysctl);
72264 +
72265 +#ifdef CONFIG_PROC_SYSCTL
72266 + if (inode->i_op == &proc_sys_inode_operations ||
72267 + inode->i_op == &proc_sys_dir_operations)
72268 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
72269 +#endif
72270 +
72271 clear_inode(inode);
72272 }
72273
72274 @@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
72275 if (de->mode) {
72276 inode->i_mode = de->mode;
72277 inode->i_uid = de->uid;
72278 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72279 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72280 +#else
72281 inode->i_gid = de->gid;
72282 +#endif
72283 }
72284 if (de->size)
72285 inode->i_size = de->size;
72286 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
72287 index 753ca37..26bcf3b 100644
72288 --- a/fs/proc/internal.h
72289 +++ b/fs/proc/internal.h
72290 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
72291 struct pid *pid, struct task_struct *task);
72292 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
72293 struct pid *pid, struct task_struct *task);
72294 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
72295 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
72296 +#endif
72297 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
72298
72299 extern const struct file_operations proc_maps_operations;
72300 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
72301 index b442dac..aab29cb 100644
72302 --- a/fs/proc/kcore.c
72303 +++ b/fs/proc/kcore.c
72304 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
72305 off_t offset = 0;
72306 struct kcore_list *m;
72307
72308 + pax_track_stack();
72309 +
72310 /* setup ELF header */
72311 elf = (struct elfhdr *) bufp;
72312 bufp += sizeof(struct elfhdr);
72313 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72314 * the addresses in the elf_phdr on our list.
72315 */
72316 start = kc_offset_to_vaddr(*fpos - elf_buflen);
72317 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
72318 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
72319 + if (tsz > buflen)
72320 tsz = buflen;
72321 -
72322 +
72323 while (buflen) {
72324 struct kcore_list *m;
72325
72326 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72327 kfree(elf_buf);
72328 } else {
72329 if (kern_addr_valid(start)) {
72330 - unsigned long n;
72331 + char *elf_buf;
72332 + mm_segment_t oldfs;
72333
72334 - n = copy_to_user(buffer, (char *)start, tsz);
72335 - /*
72336 - * We cannot distingush between fault on source
72337 - * and fault on destination. When this happens
72338 - * we clear too and hope it will trigger the
72339 - * EFAULT again.
72340 - */
72341 - if (n) {
72342 - if (clear_user(buffer + tsz - n,
72343 - n))
72344 + elf_buf = kmalloc(tsz, GFP_KERNEL);
72345 + if (!elf_buf)
72346 + return -ENOMEM;
72347 + oldfs = get_fs();
72348 + set_fs(KERNEL_DS);
72349 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
72350 + set_fs(oldfs);
72351 + if (copy_to_user(buffer, elf_buf, tsz)) {
72352 + kfree(elf_buf);
72353 return -EFAULT;
72354 + }
72355 }
72356 + set_fs(oldfs);
72357 + kfree(elf_buf);
72358 } else {
72359 if (clear_user(buffer, tsz))
72360 return -EFAULT;
72361 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72362
72363 static int open_kcore(struct inode *inode, struct file *filp)
72364 {
72365 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
72366 + return -EPERM;
72367 +#endif
72368 if (!capable(CAP_SYS_RAWIO))
72369 return -EPERM;
72370 if (kcore_need_update)
72371 diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
72372 index 7ca7834..cfe90a4 100644
72373 --- a/fs/proc/kmsg.c
72374 +++ b/fs/proc/kmsg.c
72375 @@ -12,37 +12,37 @@
72376 #include <linux/poll.h>
72377 #include <linux/proc_fs.h>
72378 #include <linux/fs.h>
72379 +#include <linux/syslog.h>
72380
72381 #include <asm/uaccess.h>
72382 #include <asm/io.h>
72383
72384 extern wait_queue_head_t log_wait;
72385
72386 -extern int do_syslog(int type, char __user *bug, int count);
72387 -
72388 static int kmsg_open(struct inode * inode, struct file * file)
72389 {
72390 - return do_syslog(1,NULL,0);
72391 + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
72392 }
72393
72394 static int kmsg_release(struct inode * inode, struct file * file)
72395 {
72396 - (void) do_syslog(0,NULL,0);
72397 + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
72398 return 0;
72399 }
72400
72401 static ssize_t kmsg_read(struct file *file, char __user *buf,
72402 size_t count, loff_t *ppos)
72403 {
72404 - if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
72405 + if ((file->f_flags & O_NONBLOCK) &&
72406 + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
72407 return -EAGAIN;
72408 - return do_syslog(2, buf, count);
72409 + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
72410 }
72411
72412 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
72413 {
72414 poll_wait(file, &log_wait, wait);
72415 - if (do_syslog(9, NULL, 0))
72416 + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
72417 return POLLIN | POLLRDNORM;
72418 return 0;
72419 }
72420 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
72421 index a65239c..ad1182a 100644
72422 --- a/fs/proc/meminfo.c
72423 +++ b/fs/proc/meminfo.c
72424 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
72425 unsigned long pages[NR_LRU_LISTS];
72426 int lru;
72427
72428 + pax_track_stack();
72429 +
72430 /*
72431 * display in kilobytes.
72432 */
72433 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
72434 vmi.used >> 10,
72435 vmi.largest_chunk >> 10
72436 #ifdef CONFIG_MEMORY_FAILURE
72437 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
72438 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
72439 #endif
72440 );
72441
72442 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
72443 index 9fe7d7e..cdb62c9 100644
72444 --- a/fs/proc/nommu.c
72445 +++ b/fs/proc/nommu.c
72446 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
72447 if (len < 1)
72448 len = 1;
72449 seq_printf(m, "%*c", len, ' ');
72450 - seq_path(m, &file->f_path, "");
72451 + seq_path(m, &file->f_path, "\n\\");
72452 }
72453
72454 seq_putc(m, '\n');
72455 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
72456 index 04d1270..25e1173 100644
72457 --- a/fs/proc/proc_net.c
72458 +++ b/fs/proc/proc_net.c
72459 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
72460 struct task_struct *task;
72461 struct nsproxy *ns;
72462 struct net *net = NULL;
72463 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72464 + const struct cred *cred = current_cred();
72465 +#endif
72466 +
72467 +#ifdef CONFIG_GRKERNSEC_PROC_USER
72468 + if (cred->fsuid)
72469 + return net;
72470 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72471 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
72472 + return net;
72473 +#endif
72474
72475 rcu_read_lock();
72476 task = pid_task(proc_pid(dir), PIDTYPE_PID);
72477 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
72478 index f667e8a..55f4d96 100644
72479 --- a/fs/proc/proc_sysctl.c
72480 +++ b/fs/proc/proc_sysctl.c
72481 @@ -7,11 +7,13 @@
72482 #include <linux/security.h>
72483 #include "internal.h"
72484
72485 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
72486 +
72487 static const struct dentry_operations proc_sys_dentry_operations;
72488 static const struct file_operations proc_sys_file_operations;
72489 -static const struct inode_operations proc_sys_inode_operations;
72490 +const struct inode_operations proc_sys_inode_operations;
72491 static const struct file_operations proc_sys_dir_file_operations;
72492 -static const struct inode_operations proc_sys_dir_operations;
72493 +const struct inode_operations proc_sys_dir_operations;
72494
72495 static struct inode *proc_sys_make_inode(struct super_block *sb,
72496 struct ctl_table_header *head, struct ctl_table *table)
72497 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
72498 if (!p)
72499 goto out;
72500
72501 + if (gr_handle_sysctl(p, MAY_EXEC))
72502 + goto out;
72503 +
72504 err = ERR_PTR(-ENOMEM);
72505 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
72506 if (h)
72507 @@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
72508
72509 err = NULL;
72510 dentry->d_op = &proc_sys_dentry_operations;
72511 +
72512 + gr_handle_proc_create(dentry, inode);
72513 +
72514 d_add(dentry, inode);
72515
72516 out:
72517 @@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
72518 return -ENOMEM;
72519 } else {
72520 child->d_op = &proc_sys_dentry_operations;
72521 +
72522 + gr_handle_proc_create(child, inode);
72523 +
72524 d_add(child, inode);
72525 }
72526 } else {
72527 @@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
72528 if (*pos < file->f_pos)
72529 continue;
72530
72531 + if (gr_handle_sysctl(table, 0))
72532 + continue;
72533 +
72534 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
72535 if (res)
72536 return res;
72537 @@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
72538 if (IS_ERR(head))
72539 return PTR_ERR(head);
72540
72541 + if (table && gr_handle_sysctl(table, MAY_EXEC))
72542 + return -ENOENT;
72543 +
72544 generic_fillattr(inode, stat);
72545 if (table)
72546 stat->mode = (stat->mode & S_IFMT) | table->mode;
72547 @@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
72548 };
72549
72550 static const struct file_operations proc_sys_dir_file_operations = {
72551 + .read = generic_read_dir,
72552 .readdir = proc_sys_readdir,
72553 .llseek = generic_file_llseek,
72554 };
72555
72556 -static const struct inode_operations proc_sys_inode_operations = {
72557 +const struct inode_operations proc_sys_inode_operations = {
72558 .permission = proc_sys_permission,
72559 .setattr = proc_sys_setattr,
72560 .getattr = proc_sys_getattr,
72561 };
72562
72563 -static const struct inode_operations proc_sys_dir_operations = {
72564 +const struct inode_operations proc_sys_dir_operations = {
72565 .lookup = proc_sys_lookup,
72566 .permission = proc_sys_permission,
72567 .setattr = proc_sys_setattr,
72568 diff --git a/fs/proc/root.c b/fs/proc/root.c
72569 index b080b79..d957e63 100644
72570 --- a/fs/proc/root.c
72571 +++ b/fs/proc/root.c
72572 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
72573 #ifdef CONFIG_PROC_DEVICETREE
72574 proc_device_tree_init();
72575 #endif
72576 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
72577 +#ifdef CONFIG_GRKERNSEC_PROC_USER
72578 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
72579 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72580 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
72581 +#endif
72582 +#else
72583 proc_mkdir("bus", NULL);
72584 +#endif
72585 proc_sys_init();
72586 }
72587
72588 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
72589 index 3b7b82a..4b420b0 100644
72590 --- a/fs/proc/task_mmu.c
72591 +++ b/fs/proc/task_mmu.c
72592 @@ -8,6 +8,7 @@
72593 #include <linux/mempolicy.h>
72594 #include <linux/swap.h>
72595 #include <linux/swapops.h>
72596 +#include <linux/grsecurity.h>
72597
72598 #include <asm/elf.h>
72599 #include <asm/uaccess.h>
72600 @@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
72601 "VmStk:\t%8lu kB\n"
72602 "VmExe:\t%8lu kB\n"
72603 "VmLib:\t%8lu kB\n"
72604 - "VmPTE:\t%8lu kB\n",
72605 - hiwater_vm << (PAGE_SHIFT-10),
72606 + "VmPTE:\t%8lu kB\n"
72607 +
72608 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72609 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
72610 +#endif
72611 +
72612 + ,hiwater_vm << (PAGE_SHIFT-10),
72613 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
72614 mm->locked_vm << (PAGE_SHIFT-10),
72615 hiwater_rss << (PAGE_SHIFT-10),
72616 total_rss << (PAGE_SHIFT-10),
72617 data << (PAGE_SHIFT-10),
72618 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
72619 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
72620 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
72621 +
72622 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72623 + , mm->context.user_cs_base, mm->context.user_cs_limit
72624 +#endif
72625 +
72626 + );
72627 }
72628
72629 unsigned long task_vsize(struct mm_struct *mm)
72630 @@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
72631 struct proc_maps_private *priv = m->private;
72632 struct vm_area_struct *vma = v;
72633
72634 - vma_stop(priv, vma);
72635 + if (!IS_ERR(vma))
72636 + vma_stop(priv, vma);
72637 if (priv->task)
72638 put_task_struct(priv->task);
72639 }
72640 @@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
72641 return ret;
72642 }
72643
72644 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72645 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
72646 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
72647 + _mm->pax_flags & MF_PAX_SEGMEXEC))
72648 +#endif
72649 +
72650 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72651 {
72652 struct mm_struct *mm = vma->vm_mm;
72653 @@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72654 int flags = vma->vm_flags;
72655 unsigned long ino = 0;
72656 unsigned long long pgoff = 0;
72657 - unsigned long start;
72658 dev_t dev = 0;
72659 int len;
72660
72661 @@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72662 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
72663 }
72664
72665 - /* We don't show the stack guard page in /proc/maps */
72666 - start = vma->vm_start;
72667 - if (vma->vm_flags & VM_GROWSDOWN)
72668 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
72669 - start += PAGE_SIZE;
72670 -
72671 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
72672 - start,
72673 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72674 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
72675 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
72676 +#else
72677 + vma->vm_start,
72678 vma->vm_end,
72679 +#endif
72680 flags & VM_READ ? 'r' : '-',
72681 flags & VM_WRITE ? 'w' : '-',
72682 flags & VM_EXEC ? 'x' : '-',
72683 flags & VM_MAYSHARE ? 's' : 'p',
72684 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72685 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
72686 +#else
72687 pgoff,
72688 +#endif
72689 MAJOR(dev), MINOR(dev), ino, &len);
72690
72691 /*
72692 @@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72693 */
72694 if (file) {
72695 pad_len_spaces(m, len);
72696 - seq_path(m, &file->f_path, "\n");
72697 + seq_path(m, &file->f_path, "\n\\");
72698 } else {
72699 const char *name = arch_vma_name(vma);
72700 if (!name) {
72701 @@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72702 if (vma->vm_start <= mm->brk &&
72703 vma->vm_end >= mm->start_brk) {
72704 name = "[heap]";
72705 - } else if (vma->vm_start <= mm->start_stack &&
72706 - vma->vm_end >= mm->start_stack) {
72707 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
72708 + (vma->vm_start <= mm->start_stack &&
72709 + vma->vm_end >= mm->start_stack)) {
72710 name = "[stack]";
72711 }
72712 } else {
72713 @@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
72714 struct proc_maps_private *priv = m->private;
72715 struct task_struct *task = priv->task;
72716
72717 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72718 + if (current->exec_id != m->exec_id) {
72719 + gr_log_badprocpid("maps");
72720 + return 0;
72721 + }
72722 +#endif
72723 +
72724 show_map_vma(m, vma);
72725
72726 if (m->count < m->size) /* vma is copied successfully */
72727 @@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
72728 .private = &mss,
72729 };
72730
72731 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72732 + if (current->exec_id != m->exec_id) {
72733 + gr_log_badprocpid("smaps");
72734 + return 0;
72735 + }
72736 +#endif
72737 memset(&mss, 0, sizeof mss);
72738 - mss.vma = vma;
72739 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
72740 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
72741 +
72742 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72743 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
72744 +#endif
72745 + mss.vma = vma;
72746 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
72747 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
72748 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72749 + }
72750 +#endif
72751
72752 show_map_vma(m, vma);
72753
72754 @@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
72755 "Swap: %8lu kB\n"
72756 "KernelPageSize: %8lu kB\n"
72757 "MMUPageSize: %8lu kB\n",
72758 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72759 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
72760 +#else
72761 (vma->vm_end - vma->vm_start) >> 10,
72762 +#endif
72763 mss.resident >> 10,
72764 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
72765 mss.shared_clean >> 10,
72766 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
72767 index 8f5c05d..c99c76d 100644
72768 --- a/fs/proc/task_nommu.c
72769 +++ b/fs/proc/task_nommu.c
72770 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
72771 else
72772 bytes += kobjsize(mm);
72773
72774 - if (current->fs && current->fs->users > 1)
72775 + if (current->fs && atomic_read(&current->fs->users) > 1)
72776 sbytes += kobjsize(current->fs);
72777 else
72778 bytes += kobjsize(current->fs);
72779 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
72780 if (len < 1)
72781 len = 1;
72782 seq_printf(m, "%*c", len, ' ');
72783 - seq_path(m, &file->f_path, "");
72784 + seq_path(m, &file->f_path, "\n\\");
72785 }
72786
72787 seq_putc(m, '\n');
72788 diff --git a/fs/readdir.c b/fs/readdir.c
72789 index 7723401..30059a6 100644
72790 --- a/fs/readdir.c
72791 +++ b/fs/readdir.c
72792 @@ -16,6 +16,7 @@
72793 #include <linux/security.h>
72794 #include <linux/syscalls.h>
72795 #include <linux/unistd.h>
72796 +#include <linux/namei.h>
72797
72798 #include <asm/uaccess.h>
72799
72800 @@ -67,6 +68,7 @@ struct old_linux_dirent {
72801
72802 struct readdir_callback {
72803 struct old_linux_dirent __user * dirent;
72804 + struct file * file;
72805 int result;
72806 };
72807
72808 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
72809 buf->result = -EOVERFLOW;
72810 return -EOVERFLOW;
72811 }
72812 +
72813 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72814 + return 0;
72815 +
72816 buf->result++;
72817 dirent = buf->dirent;
72818 if (!access_ok(VERIFY_WRITE, dirent,
72819 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
72820
72821 buf.result = 0;
72822 buf.dirent = dirent;
72823 + buf.file = file;
72824
72825 error = vfs_readdir(file, fillonedir, &buf);
72826 if (buf.result)
72827 @@ -142,6 +149,7 @@ struct linux_dirent {
72828 struct getdents_callback {
72829 struct linux_dirent __user * current_dir;
72830 struct linux_dirent __user * previous;
72831 + struct file * file;
72832 int count;
72833 int error;
72834 };
72835 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
72836 buf->error = -EOVERFLOW;
72837 return -EOVERFLOW;
72838 }
72839 +
72840 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72841 + return 0;
72842 +
72843 dirent = buf->previous;
72844 if (dirent) {
72845 if (__put_user(offset, &dirent->d_off))
72846 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
72847 buf.previous = NULL;
72848 buf.count = count;
72849 buf.error = 0;
72850 + buf.file = file;
72851
72852 error = vfs_readdir(file, filldir, &buf);
72853 if (error >= 0)
72854 @@ -228,6 +241,7 @@ out:
72855 struct getdents_callback64 {
72856 struct linux_dirent64 __user * current_dir;
72857 struct linux_dirent64 __user * previous;
72858 + struct file *file;
72859 int count;
72860 int error;
72861 };
72862 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
72863 buf->error = -EINVAL; /* only used if we fail.. */
72864 if (reclen > buf->count)
72865 return -EINVAL;
72866 +
72867 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72868 + return 0;
72869 +
72870 dirent = buf->previous;
72871 if (dirent) {
72872 if (__put_user(offset, &dirent->d_off))
72873 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
72874
72875 buf.current_dir = dirent;
72876 buf.previous = NULL;
72877 + buf.file = file;
72878 buf.count = count;
72879 buf.error = 0;
72880
72881 @@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
72882 error = buf.error;
72883 lastdirent = buf.previous;
72884 if (lastdirent) {
72885 - typeof(lastdirent->d_off) d_off = file->f_pos;
72886 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
72887 if (__put_user(d_off, &lastdirent->d_off))
72888 error = -EFAULT;
72889 else
72890 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
72891 index d42c30c..4fd8718 100644
72892 --- a/fs/reiserfs/dir.c
72893 +++ b/fs/reiserfs/dir.c
72894 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
72895 struct reiserfs_dir_entry de;
72896 int ret = 0;
72897
72898 + pax_track_stack();
72899 +
72900 reiserfs_write_lock(inode->i_sb);
72901
72902 reiserfs_check_lock_depth(inode->i_sb, "readdir");
72903 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
72904 index 128d3f7..8840d44 100644
72905 --- a/fs/reiserfs/do_balan.c
72906 +++ b/fs/reiserfs/do_balan.c
72907 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
72908 return;
72909 }
72910
72911 - atomic_inc(&(fs_generation(tb->tb_sb)));
72912 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
72913 do_balance_starts(tb);
72914
72915 /* balance leaf returns 0 except if combining L R and S into
72916 diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
72917 index 72cb1cc..d0e3181 100644
72918 --- a/fs/reiserfs/item_ops.c
72919 +++ b/fs/reiserfs/item_ops.c
72920 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
72921 vi->vi_index, vi->vi_type, vi->vi_ih);
72922 }
72923
72924 -static struct item_operations stat_data_ops = {
72925 +static const struct item_operations stat_data_ops = {
72926 .bytes_number = sd_bytes_number,
72927 .decrement_key = sd_decrement_key,
72928 .is_left_mergeable = sd_is_left_mergeable,
72929 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
72930 vi->vi_index, vi->vi_type, vi->vi_ih);
72931 }
72932
72933 -static struct item_operations direct_ops = {
72934 +static const struct item_operations direct_ops = {
72935 .bytes_number = direct_bytes_number,
72936 .decrement_key = direct_decrement_key,
72937 .is_left_mergeable = direct_is_left_mergeable,
72938 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
72939 vi->vi_index, vi->vi_type, vi->vi_ih);
72940 }
72941
72942 -static struct item_operations indirect_ops = {
72943 +static const struct item_operations indirect_ops = {
72944 .bytes_number = indirect_bytes_number,
72945 .decrement_key = indirect_decrement_key,
72946 .is_left_mergeable = indirect_is_left_mergeable,
72947 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
72948 printk("\n");
72949 }
72950
72951 -static struct item_operations direntry_ops = {
72952 +static const struct item_operations direntry_ops = {
72953 .bytes_number = direntry_bytes_number,
72954 .decrement_key = direntry_decrement_key,
72955 .is_left_mergeable = direntry_is_left_mergeable,
72956 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
72957 "Invalid item type observed, run fsck ASAP");
72958 }
72959
72960 -static struct item_operations errcatch_ops = {
72961 +static const struct item_operations errcatch_ops = {
72962 errcatch_bytes_number,
72963 errcatch_decrement_key,
72964 errcatch_is_left_mergeable,
72965 @@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
72966 #error Item types must use disk-format assigned values.
72967 #endif
72968
72969 -struct item_operations *item_ops[TYPE_ANY + 1] = {
72970 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
72971 &stat_data_ops,
72972 &indirect_ops,
72973 &direct_ops,
72974 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
72975 index b5fe0aa..e0e25c4 100644
72976 --- a/fs/reiserfs/journal.c
72977 +++ b/fs/reiserfs/journal.c
72978 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
72979 struct buffer_head *bh;
72980 int i, j;
72981
72982 + pax_track_stack();
72983 +
72984 bh = __getblk(dev, block, bufsize);
72985 if (buffer_uptodate(bh))
72986 return (bh);
72987 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
72988 index 2715791..b8996db 100644
72989 --- a/fs/reiserfs/namei.c
72990 +++ b/fs/reiserfs/namei.c
72991 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
72992 unsigned long savelink = 1;
72993 struct timespec ctime;
72994
72995 + pax_track_stack();
72996 +
72997 /* three balancings: (1) old name removal, (2) new name insertion
72998 and (3) maybe "save" link insertion
72999 stat data updates: (1) old directory,
73000 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
73001 index 9229e55..3d2e3b7 100644
73002 --- a/fs/reiserfs/procfs.c
73003 +++ b/fs/reiserfs/procfs.c
73004 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
73005 "SMALL_TAILS " : "NO_TAILS ",
73006 replay_only(sb) ? "REPLAY_ONLY " : "",
73007 convert_reiserfs(sb) ? "CONV " : "",
73008 - atomic_read(&r->s_generation_counter),
73009 + atomic_read_unchecked(&r->s_generation_counter),
73010 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
73011 SF(s_do_balance), SF(s_unneeded_left_neighbor),
73012 SF(s_good_search_by_key_reada), SF(s_bmaps),
73013 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
73014 struct journal_params *jp = &rs->s_v1.s_journal;
73015 char b[BDEVNAME_SIZE];
73016
73017 + pax_track_stack();
73018 +
73019 seq_printf(m, /* on-disk fields */
73020 "jp_journal_1st_block: \t%i\n"
73021 "jp_journal_dev: \t%s[%x]\n"
73022 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
73023 index d036ee5..4c7dca1 100644
73024 --- a/fs/reiserfs/stree.c
73025 +++ b/fs/reiserfs/stree.c
73026 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
73027 int iter = 0;
73028 #endif
73029
73030 + pax_track_stack();
73031 +
73032 BUG_ON(!th->t_trans_id);
73033
73034 init_tb_struct(th, &s_del_balance, sb, path,
73035 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
73036 int retval;
73037 int quota_cut_bytes = 0;
73038
73039 + pax_track_stack();
73040 +
73041 BUG_ON(!th->t_trans_id);
73042
73043 le_key2cpu_key(&cpu_key, key);
73044 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
73045 int quota_cut_bytes;
73046 loff_t tail_pos = 0;
73047
73048 + pax_track_stack();
73049 +
73050 BUG_ON(!th->t_trans_id);
73051
73052 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
73053 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
73054 int retval;
73055 int fs_gen;
73056
73057 + pax_track_stack();
73058 +
73059 BUG_ON(!th->t_trans_id);
73060
73061 fs_gen = get_generation(inode->i_sb);
73062 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
73063 int fs_gen = 0;
73064 int quota_bytes = 0;
73065
73066 + pax_track_stack();
73067 +
73068 BUG_ON(!th->t_trans_id);
73069
73070 if (inode) { /* Do we count quotas for item? */
73071 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
73072 index 7cb1285..c726cd0 100644
73073 --- a/fs/reiserfs/super.c
73074 +++ b/fs/reiserfs/super.c
73075 @@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
73076 {.option_name = NULL}
73077 };
73078
73079 + pax_track_stack();
73080 +
73081 *blocks = 0;
73082 if (!options || !*options)
73083 /* use default configuration: create tails, journaling on, no
73084 diff --git a/fs/select.c b/fs/select.c
73085 index fd38ce2..f5381b8 100644
73086 --- a/fs/select.c
73087 +++ b/fs/select.c
73088 @@ -20,6 +20,7 @@
73089 #include <linux/module.h>
73090 #include <linux/slab.h>
73091 #include <linux/poll.h>
73092 +#include <linux/security.h>
73093 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
73094 #include <linux/file.h>
73095 #include <linux/fdtable.h>
73096 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
73097 int retval, i, timed_out = 0;
73098 unsigned long slack = 0;
73099
73100 + pax_track_stack();
73101 +
73102 rcu_read_lock();
73103 retval = max_select_fd(n, fds);
73104 rcu_read_unlock();
73105 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
73106 /* Allocate small arguments on the stack to save memory and be faster */
73107 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
73108
73109 + pax_track_stack();
73110 +
73111 ret = -EINVAL;
73112 if (n < 0)
73113 goto out_nofds;
73114 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
73115 struct poll_list *walk = head;
73116 unsigned long todo = nfds;
73117
73118 + pax_track_stack();
73119 +
73120 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
73121 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
73122 return -EINVAL;
73123
73124 diff --git a/fs/seq_file.c b/fs/seq_file.c
73125 index eae7d9d..b7613c6 100644
73126 --- a/fs/seq_file.c
73127 +++ b/fs/seq_file.c
73128 @@ -9,6 +9,7 @@
73129 #include <linux/module.h>
73130 #include <linux/seq_file.h>
73131 #include <linux/slab.h>
73132 +#include <linux/sched.h>
73133
73134 #include <asm/uaccess.h>
73135 #include <asm/page.h>
73136 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
73137 memset(p, 0, sizeof(*p));
73138 mutex_init(&p->lock);
73139 p->op = op;
73140 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73141 + p->exec_id = current->exec_id;
73142 +#endif
73143
73144 /*
73145 * Wrappers around seq_open(e.g. swaps_open) need to be
73146 @@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
73147 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
73148 void *data)
73149 {
73150 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
73151 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
73152 int res = -ENOMEM;
73153
73154 if (op) {
73155 diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
73156 index 71c29b6..54694dd 100644
73157 --- a/fs/smbfs/proc.c
73158 +++ b/fs/smbfs/proc.c
73159 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
73160
73161 out:
73162 if (server->local_nls != NULL && server->remote_nls != NULL)
73163 - server->ops->convert = convert_cp;
73164 + *(void **)&server->ops->convert = convert_cp;
73165 else
73166 - server->ops->convert = convert_memcpy;
73167 + *(void **)&server->ops->convert = convert_memcpy;
73168
73169 smb_unlock_server(server);
73170 return n;
73171 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
73172
73173 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
73174 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
73175 - server->ops->getattr = smb_proc_getattr_core;
73176 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
73177 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
73178 - server->ops->getattr = smb_proc_getattr_ff;
73179 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
73180 }
73181
73182 /* Decode server capabilities */
73183 @@ -3439,7 +3439,7 @@ out:
73184 static void
73185 install_ops(struct smb_ops *dst, struct smb_ops *src)
73186 {
73187 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
73188 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
73189 }
73190
73191 /* < LANMAN2 */
73192 diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
73193 index 00b2909..2ace383 100644
73194 --- a/fs/smbfs/symlink.c
73195 +++ b/fs/smbfs/symlink.c
73196 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
73197
73198 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
73199 {
73200 - char *s = nd_get_link(nd);
73201 + const char *s = nd_get_link(nd);
73202 if (!IS_ERR(s))
73203 __putname(s);
73204 }
73205 diff --git a/fs/splice.c b/fs/splice.c
73206 index bb92b7c5..5aa72b0 100644
73207 --- a/fs/splice.c
73208 +++ b/fs/splice.c
73209 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
73210 pipe_lock(pipe);
73211
73212 for (;;) {
73213 - if (!pipe->readers) {
73214 + if (!atomic_read(&pipe->readers)) {
73215 send_sig(SIGPIPE, current, 0);
73216 if (!ret)
73217 ret = -EPIPE;
73218 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
73219 do_wakeup = 0;
73220 }
73221
73222 - pipe->waiting_writers++;
73223 + atomic_inc(&pipe->waiting_writers);
73224 pipe_wait(pipe);
73225 - pipe->waiting_writers--;
73226 + atomic_dec(&pipe->waiting_writers);
73227 }
73228
73229 pipe_unlock(pipe);
73230 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
73231 .spd_release = spd_release_page,
73232 };
73233
73234 + pax_track_stack();
73235 +
73236 index = *ppos >> PAGE_CACHE_SHIFT;
73237 loff = *ppos & ~PAGE_CACHE_MASK;
73238 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
73239 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
73240 old_fs = get_fs();
73241 set_fs(get_ds());
73242 /* The cast to a user pointer is valid due to the set_fs() */
73243 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
73244 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
73245 set_fs(old_fs);
73246
73247 return res;
73248 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
73249 old_fs = get_fs();
73250 set_fs(get_ds());
73251 /* The cast to a user pointer is valid due to the set_fs() */
73252 - res = vfs_write(file, (const char __user *)buf, count, &pos);
73253 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
73254 set_fs(old_fs);
73255
73256 return res;
73257 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
73258 .spd_release = spd_release_page,
73259 };
73260
73261 + pax_track_stack();
73262 +
73263 index = *ppos >> PAGE_CACHE_SHIFT;
73264 offset = *ppos & ~PAGE_CACHE_MASK;
73265 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
73266 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
73267 goto err;
73268
73269 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
73270 - vec[i].iov_base = (void __user *) page_address(page);
73271 + vec[i].iov_base = (__force void __user *) page_address(page);
73272 vec[i].iov_len = this_len;
73273 pages[i] = page;
73274 spd.nr_pages++;
73275 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
73276 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
73277 {
73278 while (!pipe->nrbufs) {
73279 - if (!pipe->writers)
73280 + if (!atomic_read(&pipe->writers))
73281 return 0;
73282
73283 - if (!pipe->waiting_writers && sd->num_spliced)
73284 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
73285 return 0;
73286
73287 if (sd->flags & SPLICE_F_NONBLOCK)
73288 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
73289 * out of the pipe right after the splice_to_pipe(). So set
73290 * PIPE_READERS appropriately.
73291 */
73292 - pipe->readers = 1;
73293 + atomic_set(&pipe->readers, 1);
73294
73295 current->splice_pipe = pipe;
73296 }
73297 @@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
73298 .spd_release = spd_release_page,
73299 };
73300
73301 + pax_track_stack();
73302 +
73303 pipe = pipe_info(file->f_path.dentry->d_inode);
73304 if (!pipe)
73305 return -EBADF;
73306 @@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73307 ret = -ERESTARTSYS;
73308 break;
73309 }
73310 - if (!pipe->writers)
73311 + if (!atomic_read(&pipe->writers))
73312 break;
73313 - if (!pipe->waiting_writers) {
73314 + if (!atomic_read(&pipe->waiting_writers)) {
73315 if (flags & SPLICE_F_NONBLOCK) {
73316 ret = -EAGAIN;
73317 break;
73318 @@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73319 pipe_lock(pipe);
73320
73321 while (pipe->nrbufs >= PIPE_BUFFERS) {
73322 - if (!pipe->readers) {
73323 + if (!atomic_read(&pipe->readers)) {
73324 send_sig(SIGPIPE, current, 0);
73325 ret = -EPIPE;
73326 break;
73327 @@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73328 ret = -ERESTARTSYS;
73329 break;
73330 }
73331 - pipe->waiting_writers++;
73332 + atomic_inc(&pipe->waiting_writers);
73333 pipe_wait(pipe);
73334 - pipe->waiting_writers--;
73335 + atomic_dec(&pipe->waiting_writers);
73336 }
73337
73338 pipe_unlock(pipe);
73339 @@ -1786,14 +1792,14 @@ retry:
73340 pipe_double_lock(ipipe, opipe);
73341
73342 do {
73343 - if (!opipe->readers) {
73344 + if (!atomic_read(&opipe->readers)) {
73345 send_sig(SIGPIPE, current, 0);
73346 if (!ret)
73347 ret = -EPIPE;
73348 break;
73349 }
73350
73351 - if (!ipipe->nrbufs && !ipipe->writers)
73352 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
73353 break;
73354
73355 /*
73356 @@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
73357 pipe_double_lock(ipipe, opipe);
73358
73359 do {
73360 - if (!opipe->readers) {
73361 + if (!atomic_read(&opipe->readers)) {
73362 send_sig(SIGPIPE, current, 0);
73363 if (!ret)
73364 ret = -EPIPE;
73365 @@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
73366 * return EAGAIN if we have the potential of some data in the
73367 * future, otherwise just return 0
73368 */
73369 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
73370 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
73371 ret = -EAGAIN;
73372
73373 pipe_unlock(ipipe);
73374 diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
73375 index 60c702b..dddc2b5 100644
73376 --- a/fs/sysfs/bin.c
73377 +++ b/fs/sysfs/bin.c
73378 @@ -67,6 +67,8 @@ fill_read(struct dentry *dentry, char *buffer, loff_t off, size_t count)
73379 }
73380
73381 static ssize_t
73382 +read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) __size_overflow(3);
73383 +static ssize_t
73384 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
73385 {
73386 struct bin_buffer *bb = file->private_data;
73387 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
73388 index e020183..18d64b4 100644
73389 --- a/fs/sysfs/dir.c
73390 +++ b/fs/sysfs/dir.c
73391 @@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
73392 struct sysfs_dirent *sd;
73393 int rc;
73394
73395 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
73396 + const char *parent_name = parent_sd->s_name;
73397 +
73398 + mode = S_IFDIR | S_IRWXU;
73399 +
73400 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
73401 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
73402 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
73403 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
73404 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
73405 +#endif
73406 +
73407 /* allocate */
73408 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
73409 if (!sd)
73410 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
73411 index 7118a38..70af853 100644
73412 --- a/fs/sysfs/file.c
73413 +++ b/fs/sysfs/file.c
73414 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
73415
73416 struct sysfs_open_dirent {
73417 atomic_t refcnt;
73418 - atomic_t event;
73419 + atomic_unchecked_t event;
73420 wait_queue_head_t poll;
73421 struct list_head buffers; /* goes through sysfs_buffer.list */
73422 };
73423 @@ -53,7 +53,7 @@ struct sysfs_buffer {
73424 size_t count;
73425 loff_t pos;
73426 char * page;
73427 - struct sysfs_ops * ops;
73428 + const struct sysfs_ops * ops;
73429 struct mutex mutex;
73430 int needs_read_fill;
73431 int event;
73432 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
73433 {
73434 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
73435 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73436 - struct sysfs_ops * ops = buffer->ops;
73437 + const struct sysfs_ops * ops = buffer->ops;
73438 int ret = 0;
73439 ssize_t count;
73440
73441 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
73442 if (!sysfs_get_active_two(attr_sd))
73443 return -ENODEV;
73444
73445 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
73446 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
73447 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
73448
73449 sysfs_put_active_two(attr_sd);
73450 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
73451 {
73452 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
73453 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73454 - struct sysfs_ops * ops = buffer->ops;
73455 + const struct sysfs_ops * ops = buffer->ops;
73456 int rc;
73457
73458 /* need attr_sd for attr and ops, its parent for kobj */
73459 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
73460 return -ENOMEM;
73461
73462 atomic_set(&new_od->refcnt, 0);
73463 - atomic_set(&new_od->event, 1);
73464 + atomic_set_unchecked(&new_od->event, 1);
73465 init_waitqueue_head(&new_od->poll);
73466 INIT_LIST_HEAD(&new_od->buffers);
73467 goto retry;
73468 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
73469 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
73470 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73471 struct sysfs_buffer *buffer;
73472 - struct sysfs_ops *ops;
73473 + const struct sysfs_ops *ops;
73474 int error = -EACCES;
73475 char *p;
73476
73477 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
73478
73479 sysfs_put_active_two(attr_sd);
73480
73481 - if (buffer->event != atomic_read(&od->event))
73482 + if (buffer->event != atomic_read_unchecked(&od->event))
73483 goto trigger;
73484
73485 return DEFAULT_POLLMASK;
73486 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
73487
73488 od = sd->s_attr.open;
73489 if (od) {
73490 - atomic_inc(&od->event);
73491 + atomic_inc_unchecked(&od->event);
73492 wake_up_interruptible(&od->poll);
73493 }
73494
73495 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
73496 index c5081ad..342ea86 100644
73497 --- a/fs/sysfs/symlink.c
73498 +++ b/fs/sysfs/symlink.c
73499 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
73500
73501 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
73502 {
73503 - char *page = nd_get_link(nd);
73504 + const char *page = nd_get_link(nd);
73505 if (!IS_ERR(page))
73506 free_page((unsigned long)page);
73507 }
73508 diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
73509 index 1e06853..b06d325 100644
73510 --- a/fs/udf/balloc.c
73511 +++ b/fs/udf/balloc.c
73512 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
73513
73514 mutex_lock(&sbi->s_alloc_mutex);
73515 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
73516 - if (bloc->logicalBlockNum < 0 ||
73517 - (bloc->logicalBlockNum + count) >
73518 - partmap->s_partition_len) {
73519 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
73520 udf_debug("%d < %d || %d + %d > %d\n",
73521 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
73522 count, partmap->s_partition_len);
73523 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
73524
73525 mutex_lock(&sbi->s_alloc_mutex);
73526 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
73527 - if (bloc->logicalBlockNum < 0 ||
73528 - (bloc->logicalBlockNum + count) >
73529 - partmap->s_partition_len) {
73530 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
73531 udf_debug("%d < %d || %d + %d > %d\n",
73532 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
73533 partmap->s_partition_len);
73534 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
73535 index 6d24c2c..fff470f 100644
73536 --- a/fs/udf/inode.c
73537 +++ b/fs/udf/inode.c
73538 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
73539 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
73540 int lastblock = 0;
73541
73542 + pax_track_stack();
73543 +
73544 prev_epos.offset = udf_file_entry_alloc_offset(inode);
73545 prev_epos.block = iinfo->i_location;
73546 prev_epos.bh = NULL;
73547 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
73548 index 9215700..bf1f68e 100644
73549 --- a/fs/udf/misc.c
73550 +++ b/fs/udf/misc.c
73551 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
73552
73553 u8 udf_tag_checksum(const struct tag *t)
73554 {
73555 - u8 *data = (u8 *)t;
73556 + const u8 *data = (const u8 *)t;
73557 u8 checksum = 0;
73558 int i;
73559 for (i = 0; i < sizeof(struct tag); ++i)
73560 diff --git a/fs/utimes.c b/fs/utimes.c
73561 index e4c75db..b4df0e0 100644
73562 --- a/fs/utimes.c
73563 +++ b/fs/utimes.c
73564 @@ -1,6 +1,7 @@
73565 #include <linux/compiler.h>
73566 #include <linux/file.h>
73567 #include <linux/fs.h>
73568 +#include <linux/security.h>
73569 #include <linux/linkage.h>
73570 #include <linux/mount.h>
73571 #include <linux/namei.h>
73572 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
73573 goto mnt_drop_write_and_out;
73574 }
73575 }
73576 +
73577 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
73578 + error = -EACCES;
73579 + goto mnt_drop_write_and_out;
73580 + }
73581 +
73582 mutex_lock(&inode->i_mutex);
73583 error = notify_change(path->dentry, &newattrs);
73584 mutex_unlock(&inode->i_mutex);
73585 diff --git a/fs/xattr.c b/fs/xattr.c
73586 index 6d4f6d3..cda3958 100644
73587 --- a/fs/xattr.c
73588 +++ b/fs/xattr.c
73589 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
73590 * Extended attribute SET operations
73591 */
73592 static long
73593 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
73594 +setxattr(struct path *path, const char __user *name, const void __user *value,
73595 size_t size, int flags)
73596 {
73597 int error;
73598 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
73599 return PTR_ERR(kvalue);
73600 }
73601
73602 - error = vfs_setxattr(d, kname, kvalue, size, flags);
73603 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
73604 + error = -EACCES;
73605 + goto out;
73606 + }
73607 +
73608 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
73609 +out:
73610 kfree(kvalue);
73611 return error;
73612 }
73613 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
73614 return error;
73615 error = mnt_want_write(path.mnt);
73616 if (!error) {
73617 - error = setxattr(path.dentry, name, value, size, flags);
73618 + error = setxattr(&path, name, value, size, flags);
73619 mnt_drop_write(path.mnt);
73620 }
73621 path_put(&path);
73622 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
73623 return error;
73624 error = mnt_want_write(path.mnt);
73625 if (!error) {
73626 - error = setxattr(path.dentry, name, value, size, flags);
73627 + error = setxattr(&path, name, value, size, flags);
73628 mnt_drop_write(path.mnt);
73629 }
73630 path_put(&path);
73631 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
73632 const void __user *,value, size_t, size, int, flags)
73633 {
73634 struct file *f;
73635 - struct dentry *dentry;
73636 int error = -EBADF;
73637
73638 f = fget(fd);
73639 if (!f)
73640 return error;
73641 - dentry = f->f_path.dentry;
73642 - audit_inode(NULL, dentry);
73643 + audit_inode(NULL, f->f_path.dentry);
73644 error = mnt_want_write_file(f);
73645 if (!error) {
73646 - error = setxattr(dentry, name, value, size, flags);
73647 + error = setxattr(&f->f_path, name, value, size, flags);
73648 mnt_drop_write(f->f_path.mnt);
73649 }
73650 fput(f);
73651 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
73652 index c6ad7c7..f2847a7 100644
73653 --- a/fs/xattr_acl.c
73654 +++ b/fs/xattr_acl.c
73655 @@ -17,8 +17,8 @@
73656 struct posix_acl *
73657 posix_acl_from_xattr(const void *value, size_t size)
73658 {
73659 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
73660 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
73661 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
73662 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
73663 int count;
73664 struct posix_acl *acl;
73665 struct posix_acl_entry *acl_e;
73666 diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
73667 index 942362f..88f96f5 100644
73668 --- a/fs/xfs/linux-2.6/xfs_ioctl.c
73669 +++ b/fs/xfs/linux-2.6/xfs_ioctl.c
73670 @@ -134,7 +134,7 @@ xfs_find_handle(
73671 }
73672
73673 error = -EFAULT;
73674 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
73675 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
73676 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
73677 goto out_put;
73678
73679 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
73680 if (IS_ERR(dentry))
73681 return PTR_ERR(dentry);
73682
73683 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
73684 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
73685 if (!kbuf)
73686 goto out_dput;
73687
73688 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
73689 xfs_mount_t *mp,
73690 void __user *arg)
73691 {
73692 - xfs_fsop_geom_t fsgeo;
73693 + xfs_fsop_geom_t fsgeo;
73694 int error;
73695
73696 error = xfs_fs_geometry(mp, &fsgeo, 3);
73697 diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
73698 index bad485a..479bd32 100644
73699 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c
73700 +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
73701 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
73702 xfs_fsop_geom_t fsgeo;
73703 int error;
73704
73705 + memset(&fsgeo, 0, sizeof(fsgeo));
73706 error = xfs_fs_geometry(mp, &fsgeo, 3);
73707 if (error)
73708 return -error;
73709 diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
73710 index 1f3b4b8..6102f6d 100644
73711 --- a/fs/xfs/linux-2.6/xfs_iops.c
73712 +++ b/fs/xfs/linux-2.6/xfs_iops.c
73713 @@ -468,7 +468,7 @@ xfs_vn_put_link(
73714 struct nameidata *nd,
73715 void *p)
73716 {
73717 - char *s = nd_get_link(nd);
73718 + const char *s = nd_get_link(nd);
73719
73720 if (!IS_ERR(s))
73721 kfree(s);
73722 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
73723 index 8971fb0..5fc1eb2 100644
73724 --- a/fs/xfs/xfs_bmap.c
73725 +++ b/fs/xfs/xfs_bmap.c
73726 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
73727 int nmap,
73728 int ret_nmap);
73729 #else
73730 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
73731 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
73732 #endif /* DEBUG */
73733
73734 #if defined(XFS_RW_TRACE)
73735 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
73736 index e89734e..5e84d8d 100644
73737 --- a/fs/xfs/xfs_dir2_sf.c
73738 +++ b/fs/xfs/xfs_dir2_sf.c
73739 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
73740 }
73741
73742 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
73743 - if (filldir(dirent, sfep->name, sfep->namelen,
73744 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
73745 + char name[sfep->namelen];
73746 + memcpy(name, sfep->name, sfep->namelen);
73747 + if (filldir(dirent, name, sfep->namelen,
73748 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
73749 + *offset = off & 0x7fffffff;
73750 + return 0;
73751 + }
73752 + } else if (filldir(dirent, sfep->name, sfep->namelen,
73753 off & 0x7fffffff, ino, DT_UNKNOWN)) {
73754 *offset = off & 0x7fffffff;
73755 return 0;
73756 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
73757 index 8f32f50..b6a41e8 100644
73758 --- a/fs/xfs/xfs_vnodeops.c
73759 +++ b/fs/xfs/xfs_vnodeops.c
73760 @@ -564,13 +564,18 @@ xfs_readlink(
73761
73762 xfs_ilock(ip, XFS_ILOCK_SHARED);
73763
73764 - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
73765 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
73766 -
73767 pathlen = ip->i_d.di_size;
73768 if (!pathlen)
73769 goto out;
73770
73771 + if (pathlen > MAXPATHLEN) {
73772 + xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
73773 + __func__, (unsigned long long)ip->i_ino, pathlen);
73774 + ASSERT(0);
73775 + error = XFS_ERROR(EFSCORRUPTED);
73776 + goto out;
73777 + }
73778 +
73779 if (ip->i_df.if_flags & XFS_IFINLINE) {
73780 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
73781 link[pathlen] = '\0';
73782 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
73783 new file mode 100644
73784 index 0000000..50819f8
73785 --- /dev/null
73786 +++ b/grsecurity/Kconfig
73787 @@ -0,0 +1,1077 @@
73788 +#
73789 +# grecurity configuration
73790 +#
73791 +
73792 +menu "Grsecurity"
73793 +
73794 +config GRKERNSEC
73795 + bool "Grsecurity"
73796 + select CRYPTO
73797 + select CRYPTO_SHA256
73798 + help
73799 + If you say Y here, you will be able to configure many features
73800 + that will enhance the security of your system. It is highly
73801 + recommended that you say Y here and read through the help
73802 + for each option so that you fully understand the features and
73803 + can evaluate their usefulness for your machine.
73804 +
73805 +choice
73806 + prompt "Security Level"
73807 + depends on GRKERNSEC
73808 + default GRKERNSEC_CUSTOM
73809 +
73810 +config GRKERNSEC_LOW
73811 + bool "Low"
73812 + select GRKERNSEC_LINK
73813 + select GRKERNSEC_FIFO
73814 + select GRKERNSEC_RANDNET
73815 + select GRKERNSEC_DMESG
73816 + select GRKERNSEC_CHROOT
73817 + select GRKERNSEC_CHROOT_CHDIR
73818 +
73819 + help
73820 + If you choose this option, several of the grsecurity options will
73821 + be enabled that will give you greater protection against a number
73822 + of attacks, while assuring that none of your software will have any
73823 + conflicts with the additional security measures. If you run a lot
73824 + of unusual software, or you are having problems with the higher
73825 + security levels, you should say Y here. With this option, the
73826 + following features are enabled:
73827 +
73828 + - Linking restrictions
73829 + - FIFO restrictions
73830 + - Restricted dmesg
73831 + - Enforced chdir("/") on chroot
73832 + - Runtime module disabling
73833 +
73834 +config GRKERNSEC_MEDIUM
73835 + bool "Medium"
73836 + select PAX
73837 + select PAX_EI_PAX
73838 + select PAX_PT_PAX_FLAGS
73839 + select PAX_HAVE_ACL_FLAGS
73840 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
73841 + select GRKERNSEC_CHROOT
73842 + select GRKERNSEC_CHROOT_SYSCTL
73843 + select GRKERNSEC_LINK
73844 + select GRKERNSEC_FIFO
73845 + select GRKERNSEC_DMESG
73846 + select GRKERNSEC_RANDNET
73847 + select GRKERNSEC_FORKFAIL
73848 + select GRKERNSEC_TIME
73849 + select GRKERNSEC_SIGNAL
73850 + select GRKERNSEC_CHROOT
73851 + select GRKERNSEC_CHROOT_UNIX
73852 + select GRKERNSEC_CHROOT_MOUNT
73853 + select GRKERNSEC_CHROOT_PIVOT
73854 + select GRKERNSEC_CHROOT_DOUBLE
73855 + select GRKERNSEC_CHROOT_CHDIR
73856 + select GRKERNSEC_CHROOT_MKNOD
73857 + select GRKERNSEC_PROC
73858 + select GRKERNSEC_PROC_USERGROUP
73859 + select PAX_RANDUSTACK
73860 + select PAX_ASLR
73861 + select PAX_RANDMMAP
73862 + select PAX_REFCOUNT if (X86 || SPARC64)
73863 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
73864 +
73865 + help
73866 + If you say Y here, several features in addition to those included
73867 + in the low additional security level will be enabled. These
73868 + features provide even more security to your system, though in rare
73869 + cases they may be incompatible with very old or poorly written
73870 + software. If you enable this option, make sure that your auth
73871 + service (identd) is running as gid 1001. With this option,
73872 + the following features (in addition to those provided in the
73873 + low additional security level) will be enabled:
73874 +
73875 + - Failed fork logging
73876 + - Time change logging
73877 + - Signal logging
73878 + - Deny mounts in chroot
73879 + - Deny double chrooting
73880 + - Deny sysctl writes in chroot
73881 + - Deny mknod in chroot
73882 + - Deny access to abstract AF_UNIX sockets out of chroot
73883 + - Deny pivot_root in chroot
73884 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
73885 + - /proc restrictions with special GID set to 10 (usually wheel)
73886 + - Address Space Layout Randomization (ASLR)
73887 + - Prevent exploitation of most refcount overflows
73888 + - Bounds checking of copying between the kernel and userland
73889 +
73890 +config GRKERNSEC_HIGH
73891 + bool "High"
73892 + select GRKERNSEC_LINK
73893 + select GRKERNSEC_FIFO
73894 + select GRKERNSEC_DMESG
73895 + select GRKERNSEC_FORKFAIL
73896 + select GRKERNSEC_TIME
73897 + select GRKERNSEC_SIGNAL
73898 + select GRKERNSEC_CHROOT
73899 + select GRKERNSEC_CHROOT_SHMAT
73900 + select GRKERNSEC_CHROOT_UNIX
73901 + select GRKERNSEC_CHROOT_MOUNT
73902 + select GRKERNSEC_CHROOT_FCHDIR
73903 + select GRKERNSEC_CHROOT_PIVOT
73904 + select GRKERNSEC_CHROOT_DOUBLE
73905 + select GRKERNSEC_CHROOT_CHDIR
73906 + select GRKERNSEC_CHROOT_MKNOD
73907 + select GRKERNSEC_CHROOT_CAPS
73908 + select GRKERNSEC_CHROOT_SYSCTL
73909 + select GRKERNSEC_CHROOT_FINDTASK
73910 + select GRKERNSEC_SYSFS_RESTRICT
73911 + select GRKERNSEC_PROC
73912 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
73913 + select GRKERNSEC_HIDESYM
73914 + select GRKERNSEC_BRUTE
73915 + select GRKERNSEC_PROC_USERGROUP
73916 + select GRKERNSEC_KMEM
73917 + select GRKERNSEC_RESLOG
73918 + select GRKERNSEC_RANDNET
73919 + select GRKERNSEC_PROC_ADD
73920 + select GRKERNSEC_CHROOT_CHMOD
73921 + select GRKERNSEC_CHROOT_NICE
73922 + select GRKERNSEC_SETXID
73923 + select GRKERNSEC_AUDIT_MOUNT
73924 + select GRKERNSEC_MODHARDEN if (MODULES)
73925 + select GRKERNSEC_HARDEN_PTRACE
73926 + select GRKERNSEC_PTRACE_READEXEC
73927 + select GRKERNSEC_VM86 if (X86_32)
73928 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
73929 + select PAX
73930 + select PAX_RANDUSTACK
73931 + select PAX_ASLR
73932 + select PAX_RANDMMAP
73933 + select PAX_NOEXEC
73934 + select PAX_MPROTECT
73935 + select PAX_EI_PAX
73936 + select PAX_PT_PAX_FLAGS
73937 + select PAX_HAVE_ACL_FLAGS
73938 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
73939 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
73940 + select PAX_RANDKSTACK if (X86_TSC && X86)
73941 + select PAX_SEGMEXEC if (X86_32)
73942 + select PAX_PAGEEXEC
73943 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
73944 + select PAX_EMUTRAMP if (PARISC)
73945 + select PAX_EMUSIGRT if (PARISC)
73946 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
73947 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
73948 + select PAX_REFCOUNT if (X86 || SPARC64)
73949 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
73950 + help
73951 + If you say Y here, many of the features of grsecurity will be
73952 + enabled, which will protect you against many kinds of attacks
73953 + against your system. The heightened security comes at a cost
73954 + of an increased chance of incompatibilities with rare software
73955 + on your machine. Since this security level enables PaX, you should
73956 + view <http://pax.grsecurity.net> and read about the PaX
73957 + project. While you are there, download chpax and run it on
73958 + binaries that cause problems with PaX. Also remember that
73959 + since the /proc restrictions are enabled, you must run your
73960 + identd as gid 1001. This security level enables the following
73961 + features in addition to those listed in the low and medium
73962 + security levels:
73963 +
73964 + - Additional /proc restrictions
73965 + - Chmod restrictions in chroot
73966 + - No signals, ptrace, or viewing of processes outside of chroot
73967 + - Capability restrictions in chroot
73968 + - Deny fchdir out of chroot
73969 + - Priority restrictions in chroot
73970 + - Segmentation-based implementation of PaX
73971 + - Mprotect restrictions
73972 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
73973 + - Kernel stack randomization
73974 + - Mount/unmount/remount logging
73975 + - Kernel symbol hiding
73976 + - Hardening of module auto-loading
73977 + - Ptrace restrictions
73978 + - Restricted vm86 mode
73979 + - Restricted sysfs/debugfs
73980 + - Active kernel exploit response
73981 +
73982 +config GRKERNSEC_CUSTOM
73983 + bool "Custom"
73984 + help
73985 + If you say Y here, you will be able to configure every grsecurity
73986 + option, which allows you to enable many more features that aren't
73987 + covered in the basic security levels. These additional features
73988 + include TPE, socket restrictions, and the sysctl system for
73989 + grsecurity. It is advised that you read through the help for
73990 + each option to determine its usefulness in your situation.
73991 +
73992 +endchoice
73993 +
73994 +menu "Memory Protections"
73995 +depends on GRKERNSEC
73996 +
73997 +config GRKERNSEC_KMEM
73998 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
73999 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
74000 + help
74001 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
74002 + be written to or read from to modify or leak the contents of the running
74003 + kernel. /dev/port will also not be allowed to be opened. If you have module
74004 + support disabled, enabling this will close up four ways that are
74005 + currently used to insert malicious code into the running kernel.
74006 + Even with all these features enabled, we still highly recommend that
74007 + you use the RBAC system, as it is still possible for an attacker to
74008 + modify the running kernel through privileged I/O granted by ioperm/iopl.
74009 + If you are not using XFree86, you may be able to stop this additional
74010 + case by enabling the 'Disable privileged I/O' option. Though nothing
74011 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
74012 + but only to video memory, which is the only writing we allow in this
74013 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
74014 + not be allowed to mprotect it with PROT_WRITE later.
74015 + It is highly recommended that you say Y here if you meet all the
74016 + conditions above.
74017 +
74018 +config GRKERNSEC_VM86
74019 + bool "Restrict VM86 mode"
74020 + depends on X86_32
74021 +
74022 + help
74023 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
74024 + make use of a special execution mode on 32bit x86 processors called
74025 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
74026 + video cards and will still work with this option enabled. The purpose
74027 + of the option is to prevent exploitation of emulation errors in
74028 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
74029 + Nearly all users should be able to enable this option.
74030 +
74031 +config GRKERNSEC_IO
74032 + bool "Disable privileged I/O"
74033 + depends on X86
74034 + select RTC_CLASS
74035 + select RTC_INTF_DEV
74036 + select RTC_DRV_CMOS
74037 +
74038 + help
74039 + If you say Y here, all ioperm and iopl calls will return an error.
74040 + Ioperm and iopl can be used to modify the running kernel.
74041 + Unfortunately, some programs need this access to operate properly,
74042 + the most notable of which are XFree86 and hwclock. hwclock can be
74043 + remedied by having RTC support in the kernel, so real-time
74044 + clock support is enabled if this option is enabled, to ensure
74045 + that hwclock operates correctly. XFree86 still will not
74046 + operate correctly with this option enabled, so DO NOT CHOOSE Y
74047 + IF YOU USE XFree86. If you use XFree86 and you still want to
74048 + protect your kernel against modification, use the RBAC system.
74049 +
74050 +config GRKERNSEC_PROC_MEMMAP
74051 + bool "Harden ASLR against information leaks and entropy reduction"
74052 + default y if (PAX_NOEXEC || PAX_ASLR)
74053 + depends on PAX_NOEXEC || PAX_ASLR
74054 + help
74055 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
74056 + give no information about the addresses of its mappings if
74057 + PaX features that rely on random addresses are enabled on the task.
74058 + In addition to sanitizing this information and disabling other
74059 + dangerous sources of information, this option causes reads of sensitive
74060 + /proc/<pid> entries where the file descriptor was opened in a different
74061 + task than the one performing the read. Such attempts are logged.
74062 + This option also limits argv/env strings for suid/sgid binaries
74063 + to 512KB to prevent a complete exhaustion of the stack entropy provided
74064 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
74065 + binaries to prevent alternative mmap layouts from being abused.
74066 +
74067 + If you use PaX it is essential that you say Y here as it closes up
74068 + several holes that make full ASLR useless locally.
74069 +
74070 +config GRKERNSEC_BRUTE
74071 + bool "Deter exploit bruteforcing"
74072 + help
74073 + If you say Y here, attempts to bruteforce exploits against forking
74074 + daemons such as apache or sshd, as well as against suid/sgid binaries
74075 + will be deterred. When a child of a forking daemon is killed by PaX
74076 + or crashes due to an illegal instruction or other suspicious signal,
74077 + the parent process will be delayed 30 seconds upon every subsequent
74078 + fork until the administrator is able to assess the situation and
74079 + restart the daemon.
74080 + In the suid/sgid case, the attempt is logged, the user has all their
74081 + processes terminated, and they are prevented from executing any further
74082 + processes for 15 minutes.
74083 + It is recommended that you also enable signal logging in the auditing
74084 + section so that logs are generated when a process triggers a suspicious
74085 + signal.
74086 + If the sysctl option is enabled, a sysctl option with name
74087 + "deter_bruteforce" is created.
74088 +
74089 +config GRKERNSEC_MODHARDEN
74090 + bool "Harden module auto-loading"
74091 + depends on MODULES
74092 + help
74093 + If you say Y here, module auto-loading in response to use of some
74094 + feature implemented by an unloaded module will be restricted to
74095 + root users. Enabling this option helps defend against attacks
74096 + by unprivileged users who abuse the auto-loading behavior to
74097 + cause a vulnerable module to load that is then exploited.
74098 +
74099 + If this option prevents a legitimate use of auto-loading for a
74100 + non-root user, the administrator can execute modprobe manually
74101 + with the exact name of the module mentioned in the alert log.
74102 + Alternatively, the administrator can add the module to the list
74103 + of modules loaded at boot by modifying init scripts.
74104 +
74105 + Modification of init scripts will most likely be needed on
74106 + Ubuntu servers with encrypted home directory support enabled,
74107 + as the first non-root user logging in will cause the ecb(aes),
74108 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
74109 +
74110 +config GRKERNSEC_HIDESYM
74111 + bool "Hide kernel symbols"
74112 + help
74113 + If you say Y here, getting information on loaded modules, and
74114 + displaying all kernel symbols through a syscall will be restricted
74115 + to users with CAP_SYS_MODULE. For software compatibility reasons,
74116 + /proc/kallsyms will be restricted to the root user. The RBAC
74117 + system can hide that entry even from root.
74118 +
74119 + This option also prevents leaking of kernel addresses through
74120 + several /proc entries.
74121 +
74122 + Note that this option is only effective provided the following
74123 + conditions are met:
74124 + 1) The kernel using grsecurity is not precompiled by some distribution
74125 + 2) You have also enabled GRKERNSEC_DMESG
74126 + 3) You are using the RBAC system and hiding other files such as your
74127 + kernel image and System.map. Alternatively, enabling this option
74128 + causes the permissions on /boot, /lib/modules, and the kernel
74129 + source directory to change at compile time to prevent
74130 + reading by non-root users.
74131 + If the above conditions are met, this option will aid in providing a
74132 + useful protection against local kernel exploitation of overflows
74133 + and arbitrary read/write vulnerabilities.
74134 +
74135 +config GRKERNSEC_KERN_LOCKOUT
74136 + bool "Active kernel exploit response"
74137 + depends on X86 || ARM || PPC || SPARC
74138 + help
74139 + If you say Y here, when a PaX alert is triggered due to suspicious
74140 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
74141 + or an OOPs occurs due to bad memory accesses, instead of just
74142 + terminating the offending process (and potentially allowing
74143 + a subsequent exploit from the same user), we will take one of two
74144 + actions:
74145 + If the user was root, we will panic the system
74146 + If the user was non-root, we will log the attempt, terminate
74147 + all processes owned by the user, then prevent them from creating
74148 + any new processes until the system is restarted
74149 + This deters repeated kernel exploitation/bruteforcing attempts
74150 + and is useful for later forensics.
74151 +
74152 +endmenu
74153 +menu "Role Based Access Control Options"
74154 +depends on GRKERNSEC
74155 +
74156 +config GRKERNSEC_RBAC_DEBUG
74157 + bool
74158 +
74159 +config GRKERNSEC_NO_RBAC
74160 + bool "Disable RBAC system"
74161 + help
74162 + If you say Y here, the /dev/grsec device will be removed from the kernel,
74163 + preventing the RBAC system from being enabled. You should only say Y
74164 + here if you have no intention of using the RBAC system, so as to prevent
74165 + an attacker with root access from misusing the RBAC system to hide files
74166 + and processes when loadable module support and /dev/[k]mem have been
74167 + locked down.
74168 +
74169 +config GRKERNSEC_ACL_HIDEKERN
74170 + bool "Hide kernel processes"
74171 + help
74172 + If you say Y here, all kernel threads will be hidden to all
74173 + processes but those whose subject has the "view hidden processes"
74174 + flag.
74175 +
74176 +config GRKERNSEC_ACL_MAXTRIES
74177 + int "Maximum tries before password lockout"
74178 + default 3
74179 + help
74180 + This option enforces the maximum number of times a user can attempt
74181 + to authorize themselves with the grsecurity RBAC system before being
74182 + denied the ability to attempt authorization again for a specified time.
74183 + The lower the number, the harder it will be to brute-force a password.
74184 +
74185 +config GRKERNSEC_ACL_TIMEOUT
74186 + int "Time to wait after max password tries, in seconds"
74187 + default 30
74188 + help
74189 + This option specifies the time the user must wait after attempting to
74190 + authorize to the RBAC system with the maximum number of invalid
74191 + passwords. The higher the number, the harder it will be to brute-force
74192 + a password.
74193 +
74194 +endmenu
74195 +menu "Filesystem Protections"
74196 +depends on GRKERNSEC
74197 +
74198 +config GRKERNSEC_PROC
74199 + bool "Proc restrictions"
74200 + help
74201 + If you say Y here, the permissions of the /proc filesystem
74202 + will be altered to enhance system security and privacy. You MUST
74203 + choose either a user only restriction or a user and group restriction.
74204 + Depending upon the option you choose, you can either restrict users to
74205 + see only the processes they themselves run, or choose a group that can
74206 + view all processes and files normally restricted to root if you choose
74207 + the "restrict to user only" option. NOTE: If you're running identd or
74208 + ntpd as a non-root user, you will have to run it as the group you
74209 + specify here.
74210 +
74211 +config GRKERNSEC_PROC_USER
74212 + bool "Restrict /proc to user only"
74213 + depends on GRKERNSEC_PROC
74214 + help
74215 + If you say Y here, non-root users will only be able to view their own
74216 + processes, and restricts them from viewing network-related information,
74217 + and viewing kernel symbol and module information.
74218 +
74219 +config GRKERNSEC_PROC_USERGROUP
74220 + bool "Allow special group"
74221 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
74222 + help
74223 + If you say Y here, you will be able to select a group that will be
74224 + able to view all processes and network-related information. If you've
74225 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
74226 + remain hidden. This option is useful if you want to run identd as
74227 + a non-root user.
74228 +
74229 +config GRKERNSEC_PROC_GID
74230 + int "GID for special group"
74231 + depends on GRKERNSEC_PROC_USERGROUP
74232 + default 1001
74233 +
74234 +config GRKERNSEC_PROC_ADD
74235 + bool "Additional restrictions"
74236 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
74237 + help
74238 + If you say Y here, additional restrictions will be placed on
74239 + /proc that keep normal users from viewing device information and
74240 + slabinfo information that could be useful for exploits.
74241 +
74242 +config GRKERNSEC_LINK
74243 + bool "Linking restrictions"
74244 + help
74245 + If you say Y here, /tmp race exploits will be prevented, since users
74246 + will no longer be able to follow symlinks owned by other users in
74247 + world-writable +t directories (e.g. /tmp), unless the owner of the
74248 + symlink is the owner of the directory. users will also not be
74249 + able to hardlink to files they do not own. If the sysctl option is
74250 + enabled, a sysctl option with name "linking_restrictions" is created.
74251 +
74252 +config GRKERNSEC_FIFO
74253 + bool "FIFO restrictions"
74254 + help
74255 + If you say Y here, users will not be able to write to FIFOs they don't
74256 + own in world-writable +t directories (e.g. /tmp), unless the owner of
74257 + the FIFO is the same owner of the directory it's held in. If the sysctl
74258 + option is enabled, a sysctl option with name "fifo_restrictions" is
74259 + created.
74260 +
74261 +config GRKERNSEC_SYSFS_RESTRICT
74262 + bool "Sysfs/debugfs restriction"
74263 + depends on SYSFS
74264 + help
74265 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
74266 + any filesystem normally mounted under it (e.g. debugfs) will be
74267 + mostly accessible only by root. These filesystems generally provide access
74268 + to hardware and debug information that isn't appropriate for unprivileged
74269 + users of the system. Sysfs and debugfs have also become a large source
74270 + of new vulnerabilities, ranging from infoleaks to local compromise.
74271 + There has been very little oversight with an eye toward security involved
74272 + in adding new exporters of information to these filesystems, so their
74273 + use is discouraged.
74274 + For reasons of compatibility, a few directories have been whitelisted
74275 + for access by non-root users:
74276 + /sys/fs/selinux
74277 + /sys/fs/fuse
74278 + /sys/devices/system/cpu
74279 +
74280 +config GRKERNSEC_ROFS
74281 + bool "Runtime read-only mount protection"
74282 + help
74283 + If you say Y here, a sysctl option with name "romount_protect" will
74284 + be created. By setting this option to 1 at runtime, filesystems
74285 + will be protected in the following ways:
74286 + * No new writable mounts will be allowed
74287 + * Existing read-only mounts won't be able to be remounted read/write
74288 + * Write operations will be denied on all block devices
74289 + This option acts independently of grsec_lock: once it is set to 1,
74290 + it cannot be turned off. Therefore, please be mindful of the resulting
74291 + behavior if this option is enabled in an init script on a read-only
74292 + filesystem. This feature is mainly intended for secure embedded systems.
74293 +
74294 +config GRKERNSEC_CHROOT
74295 + bool "Chroot jail restrictions"
74296 + help
74297 + If you say Y here, you will be able to choose several options that will
74298 + make breaking out of a chrooted jail much more difficult. If you
74299 + encounter no software incompatibilities with the following options, it
74300 + is recommended that you enable each one.
74301 +
74302 +config GRKERNSEC_CHROOT_MOUNT
74303 + bool "Deny mounts"
74304 + depends on GRKERNSEC_CHROOT
74305 + help
74306 + If you say Y here, processes inside a chroot will not be able to
74307 + mount or remount filesystems. If the sysctl option is enabled, a
74308 + sysctl option with name "chroot_deny_mount" is created.
74309 +
74310 +config GRKERNSEC_CHROOT_DOUBLE
74311 + bool "Deny double-chroots"
74312 + depends on GRKERNSEC_CHROOT
74313 + help
74314 + If you say Y here, processes inside a chroot will not be able to chroot
74315 + again outside the chroot. This is a widely used method of breaking
74316 + out of a chroot jail and should not be allowed. If the sysctl
74317 + option is enabled, a sysctl option with name
74318 + "chroot_deny_chroot" is created.
74319 +
74320 +config GRKERNSEC_CHROOT_PIVOT
74321 + bool "Deny pivot_root in chroot"
74322 + depends on GRKERNSEC_CHROOT
74323 + help
74324 + If you say Y here, processes inside a chroot will not be able to use
74325 + a function called pivot_root() that was introduced in Linux 2.3.41. It
74326 + works similar to chroot in that it changes the root filesystem. This
74327 + function could be misused in a chrooted process to attempt to break out
74328 + of the chroot, and therefore should not be allowed. If the sysctl
74329 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
74330 + created.
74331 +
74332 +config GRKERNSEC_CHROOT_CHDIR
74333 + bool "Enforce chdir(\"/\") on all chroots"
74334 + depends on GRKERNSEC_CHROOT
74335 + help
74336 + If you say Y here, the current working directory of all newly-chrooted
74337 + applications will be set to the the root directory of the chroot.
74338 + The man page on chroot(2) states:
74339 + Note that this call does not change the current working
74340 + directory, so that `.' can be outside the tree rooted at
74341 + `/'. In particular, the super-user can escape from a
74342 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
74343 +
74344 + It is recommended that you say Y here, since it's not known to break
74345 + any software. If the sysctl option is enabled, a sysctl option with
74346 + name "chroot_enforce_chdir" is created.
74347 +
74348 +config GRKERNSEC_CHROOT_CHMOD
74349 + bool "Deny (f)chmod +s"
74350 + depends on GRKERNSEC_CHROOT
74351 + help
74352 + If you say Y here, processes inside a chroot will not be able to chmod
74353 + or fchmod files to make them have suid or sgid bits. This protects
74354 + against another published method of breaking a chroot. If the sysctl
74355 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
74356 + created.
74357 +
74358 +config GRKERNSEC_CHROOT_FCHDIR
74359 + bool "Deny fchdir out of chroot"
74360 + depends on GRKERNSEC_CHROOT
74361 + help
74362 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
74363 + to a file descriptor of the chrooting process that points to a directory
74364 + outside the filesystem will be stopped. If the sysctl option
74365 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
74366 +
74367 +config GRKERNSEC_CHROOT_MKNOD
74368 + bool "Deny mknod"
74369 + depends on GRKERNSEC_CHROOT
74370 + help
74371 + If you say Y here, processes inside a chroot will not be allowed to
74372 + mknod. The problem with using mknod inside a chroot is that it
74373 + would allow an attacker to create a device entry that is the same
74374 + as one on the physical root of your system, which could range from
74375 + anything from the console device to a device for your harddrive (which
74376 + they could then use to wipe the drive or steal data). It is recommended
74377 + that you say Y here, unless you run into software incompatibilities.
74378 + If the sysctl option is enabled, a sysctl option with name
74379 + "chroot_deny_mknod" is created.
74380 +
74381 +config GRKERNSEC_CHROOT_SHMAT
74382 + bool "Deny shmat() out of chroot"
74383 + depends on GRKERNSEC_CHROOT
74384 + help
74385 + If you say Y here, processes inside a chroot will not be able to attach
74386 + to shared memory segments that were created outside of the chroot jail.
74387 + It is recommended that you say Y here. If the sysctl option is enabled,
74388 + a sysctl option with name "chroot_deny_shmat" is created.
74389 +
74390 +config GRKERNSEC_CHROOT_UNIX
74391 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
74392 + depends on GRKERNSEC_CHROOT
74393 + help
74394 + If you say Y here, processes inside a chroot will not be able to
74395 + connect to abstract (meaning not belonging to a filesystem) Unix
74396 + domain sockets that were bound outside of a chroot. It is recommended
74397 + that you say Y here. If the sysctl option is enabled, a sysctl option
74398 + with name "chroot_deny_unix" is created.
74399 +
74400 +config GRKERNSEC_CHROOT_FINDTASK
74401 + bool "Protect outside processes"
74402 + depends on GRKERNSEC_CHROOT
74403 + help
74404 + If you say Y here, processes inside a chroot will not be able to
74405 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
74406 + getsid, or view any process outside of the chroot. If the sysctl
74407 + option is enabled, a sysctl option with name "chroot_findtask" is
74408 + created.
74409 +
74410 +config GRKERNSEC_CHROOT_NICE
74411 + bool "Restrict priority changes"
74412 + depends on GRKERNSEC_CHROOT
74413 + help
74414 + If you say Y here, processes inside a chroot will not be able to raise
74415 + the priority of processes in the chroot, or alter the priority of
74416 + processes outside the chroot. This provides more security than simply
74417 + removing CAP_SYS_NICE from the process' capability set. If the
74418 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
74419 + is created.
74420 +
74421 +config GRKERNSEC_CHROOT_SYSCTL
74422 + bool "Deny sysctl writes"
74423 + depends on GRKERNSEC_CHROOT
74424 + help
74425 + If you say Y here, an attacker in a chroot will not be able to
74426 + write to sysctl entries, either by sysctl(2) or through a /proc
74427 + interface. It is strongly recommended that you say Y here. If the
74428 + sysctl option is enabled, a sysctl option with name
74429 + "chroot_deny_sysctl" is created.
74430 +
74431 +config GRKERNSEC_CHROOT_CAPS
74432 + bool "Capability restrictions"
74433 + depends on GRKERNSEC_CHROOT
74434 + help
74435 + If you say Y here, the capabilities on all processes within a
74436 + chroot jail will be lowered to stop module insertion, raw i/o,
74437 + system and net admin tasks, rebooting the system, modifying immutable
74438 + files, modifying IPC owned by another, and changing the system time.
74439 + This is left an option because it can break some apps. Disable this
74440 + if your chrooted apps are having problems performing those kinds of
74441 + tasks. If the sysctl option is enabled, a sysctl option with
74442 + name "chroot_caps" is created.
74443 +
74444 +endmenu
74445 +menu "Kernel Auditing"
74446 +depends on GRKERNSEC
74447 +
74448 +config GRKERNSEC_AUDIT_GROUP
74449 + bool "Single group for auditing"
74450 + help
74451 + If you say Y here, the exec, chdir, and (un)mount logging features
74452 + will only operate on a group you specify. This option is recommended
74453 + if you only want to watch certain users instead of having a large
74454 + amount of logs from the entire system. If the sysctl option is enabled,
74455 + a sysctl option with name "audit_group" is created.
74456 +
74457 +config GRKERNSEC_AUDIT_GID
74458 + int "GID for auditing"
74459 + depends on GRKERNSEC_AUDIT_GROUP
74460 + default 1007
74461 +
74462 +config GRKERNSEC_EXECLOG
74463 + bool "Exec logging"
74464 + help
74465 + If you say Y here, all execve() calls will be logged (since the
74466 + other exec*() calls are frontends to execve(), all execution
74467 + will be logged). Useful for shell-servers that like to keep track
74468 + of their users. If the sysctl option is enabled, a sysctl option with
74469 + name "exec_logging" is created.
74470 + WARNING: This option when enabled will produce a LOT of logs, especially
74471 + on an active system.
74472 +
74473 +config GRKERNSEC_RESLOG
74474 + bool "Resource logging"
74475 + help
74476 + If you say Y here, all attempts to overstep resource limits will
74477 + be logged with the resource name, the requested size, and the current
74478 + limit. It is highly recommended that you say Y here. If the sysctl
74479 + option is enabled, a sysctl option with name "resource_logging" is
74480 + created. If the RBAC system is enabled, the sysctl value is ignored.
74481 +
74482 +config GRKERNSEC_CHROOT_EXECLOG
74483 + bool "Log execs within chroot"
74484 + help
74485 + If you say Y here, all executions inside a chroot jail will be logged
74486 + to syslog. This can cause a large amount of logs if certain
74487 + applications (eg. djb's daemontools) are installed on the system, and
74488 + is therefore left as an option. If the sysctl option is enabled, a
74489 + sysctl option with name "chroot_execlog" is created.
74490 +
74491 +config GRKERNSEC_AUDIT_PTRACE
74492 + bool "Ptrace logging"
74493 + help
74494 + If you say Y here, all attempts to attach to a process via ptrace
74495 + will be logged. If the sysctl option is enabled, a sysctl option
74496 + with name "audit_ptrace" is created.
74497 +
74498 +config GRKERNSEC_AUDIT_CHDIR
74499 + bool "Chdir logging"
74500 + help
74501 + If you say Y here, all chdir() calls will be logged. If the sysctl
74502 + option is enabled, a sysctl option with name "audit_chdir" is created.
74503 +
74504 +config GRKERNSEC_AUDIT_MOUNT
74505 + bool "(Un)Mount logging"
74506 + help
74507 + If you say Y here, all mounts and unmounts will be logged. If the
74508 + sysctl option is enabled, a sysctl option with name "audit_mount" is
74509 + created.
74510 +
74511 +config GRKERNSEC_SIGNAL
74512 + bool "Signal logging"
74513 + help
74514 + If you say Y here, certain important signals will be logged, such as
74515 + SIGSEGV, which will as a result inform you of when a error in a program
74516 + occurred, which in some cases could mean a possible exploit attempt.
74517 + If the sysctl option is enabled, a sysctl option with name
74518 + "signal_logging" is created.
74519 +
74520 +config GRKERNSEC_FORKFAIL
74521 + bool "Fork failure logging"
74522 + help
74523 + If you say Y here, all failed fork() attempts will be logged.
74524 + This could suggest a fork bomb, or someone attempting to overstep
74525 + their process limit. If the sysctl option is enabled, a sysctl option
74526 + with name "forkfail_logging" is created.
74527 +
74528 +config GRKERNSEC_TIME
74529 + bool "Time change logging"
74530 + help
74531 + If you say Y here, any changes of the system clock will be logged.
74532 + If the sysctl option is enabled, a sysctl option with name
74533 + "timechange_logging" is created.
74534 +
74535 +config GRKERNSEC_PROC_IPADDR
74536 + bool "/proc/<pid>/ipaddr support"
74537 + help
74538 + If you say Y here, a new entry will be added to each /proc/<pid>
74539 + directory that contains the IP address of the person using the task.
74540 + The IP is carried across local TCP and AF_UNIX stream sockets.
74541 + This information can be useful for IDS/IPSes to perform remote response
74542 + to a local attack. The entry is readable by only the owner of the
74543 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
74544 + the RBAC system), and thus does not create privacy concerns.
74545 +
74546 +config GRKERNSEC_RWXMAP_LOG
74547 + bool 'Denied RWX mmap/mprotect logging'
74548 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
74549 + help
74550 + If you say Y here, calls to mmap() and mprotect() with explicit
74551 + usage of PROT_WRITE and PROT_EXEC together will be logged when
74552 + denied by the PAX_MPROTECT feature. If the sysctl option is
74553 + enabled, a sysctl option with name "rwxmap_logging" is created.
74554 +
74555 +config GRKERNSEC_AUDIT_TEXTREL
74556 + bool 'ELF text relocations logging (READ HELP)'
74557 + depends on PAX_MPROTECT
74558 + help
74559 + If you say Y here, text relocations will be logged with the filename
74560 + of the offending library or binary. The purpose of the feature is
74561 + to help Linux distribution developers get rid of libraries and
74562 + binaries that need text relocations which hinder the future progress
74563 + of PaX. Only Linux distribution developers should say Y here, and
74564 + never on a production machine, as this option creates an information
74565 + leak that could aid an attacker in defeating the randomization of
74566 + a single memory region. If the sysctl option is enabled, a sysctl
74567 + option with name "audit_textrel" is created.
74568 +
74569 +endmenu
74570 +
74571 +menu "Executable Protections"
74572 +depends on GRKERNSEC
74573 +
74574 +config GRKERNSEC_DMESG
74575 + bool "Dmesg(8) restriction"
74576 + help
74577 + If you say Y here, non-root users will not be able to use dmesg(8)
74578 + to view up to the last 4kb of messages in the kernel's log buffer.
74579 + The kernel's log buffer often contains kernel addresses and other
74580 + identifying information useful to an attacker in fingerprinting a
74581 + system for a targeted exploit.
74582 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
74583 + created.
74584 +
74585 +config GRKERNSEC_HARDEN_PTRACE
74586 + bool "Deter ptrace-based process snooping"
74587 + help
74588 + If you say Y here, TTY sniffers and other malicious monitoring
74589 + programs implemented through ptrace will be defeated. If you
74590 + have been using the RBAC system, this option has already been
74591 + enabled for several years for all users, with the ability to make
74592 + fine-grained exceptions.
74593 +
74594 + This option only affects the ability of non-root users to ptrace
74595 + processes that are not a descendent of the ptracing process.
74596 + This means that strace ./binary and gdb ./binary will still work,
74597 + but attaching to arbitrary processes will not. If the sysctl
74598 + option is enabled, a sysctl option with name "harden_ptrace" is
74599 + created.
74600 +
74601 +config GRKERNSEC_PTRACE_READEXEC
74602 + bool "Require read access to ptrace sensitive binaries"
74603 + help
74604 + If you say Y here, unprivileged users will not be able to ptrace unreadable
74605 + binaries. This option is useful in environments that
74606 + remove the read bits (e.g. file mode 4711) from suid binaries to
74607 + prevent infoleaking of their contents. This option adds
74608 + consistency to the use of that file mode, as the binary could normally
74609 + be read out when run without privileges while ptracing.
74610 +
74611 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
74612 + is created.
74613 +
74614 +config GRKERNSEC_SETXID
74615 + bool "Enforce consistent multithreaded privileges"
74616 + help
74617 + If you say Y here, a change from a root uid to a non-root uid
74618 + in a multithreaded application will cause the resulting uids,
74619 + gids, supplementary groups, and capabilities in that thread
74620 + to be propagated to the other threads of the process. In most
74621 + cases this is unnecessary, as glibc will emulate this behavior
74622 + on behalf of the application. Other libcs do not act in the
74623 + same way, allowing the other threads of the process to continue
74624 + running with root privileges. If the sysctl option is enabled,
74625 + a sysctl option with name "consistent_setxid" is created.
74626 +
74627 +config GRKERNSEC_TPE
74628 + bool "Trusted Path Execution (TPE)"
74629 + help
74630 + If you say Y here, you will be able to choose a gid to add to the
74631 + supplementary groups of users you want to mark as "untrusted."
74632 + These users will not be able to execute any files that are not in
74633 + root-owned directories writable only by root. If the sysctl option
74634 + is enabled, a sysctl option with name "tpe" is created.
74635 +
74636 +config GRKERNSEC_TPE_ALL
74637 + bool "Partially restrict all non-root users"
74638 + depends on GRKERNSEC_TPE
74639 + help
74640 + If you say Y here, all non-root users will be covered under
74641 + a weaker TPE restriction. This is separate from, and in addition to,
74642 + the main TPE options that you have selected elsewhere. Thus, if a
74643 + "trusted" GID is chosen, this restriction applies to even that GID.
74644 + Under this restriction, all non-root users will only be allowed to
74645 + execute files in directories they own that are not group or
74646 + world-writable, or in directories owned by root and writable only by
74647 + root. If the sysctl option is enabled, a sysctl option with name
74648 + "tpe_restrict_all" is created.
74649 +
74650 +config GRKERNSEC_TPE_INVERT
74651 + bool "Invert GID option"
74652 + depends on GRKERNSEC_TPE
74653 + help
74654 + If you say Y here, the group you specify in the TPE configuration will
74655 + decide what group TPE restrictions will be *disabled* for. This
74656 + option is useful if you want TPE restrictions to be applied to most
74657 + users on the system. If the sysctl option is enabled, a sysctl option
74658 + with name "tpe_invert" is created. Unlike other sysctl options, this
74659 + entry will default to on for backward-compatibility.
74660 +
74661 +config GRKERNSEC_TPE_GID
74662 + int "GID for untrusted users"
74663 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
74664 + default 1005
74665 + help
74666 + Setting this GID determines what group TPE restrictions will be
74667 + *enabled* for. If the sysctl option is enabled, a sysctl option
74668 + with name "tpe_gid" is created.
74669 +
74670 +config GRKERNSEC_TPE_GID
74671 + int "GID for trusted users"
74672 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
74673 + default 1005
74674 + help
74675 + Setting this GID determines what group TPE restrictions will be
74676 + *disabled* for. If the sysctl option is enabled, a sysctl option
74677 + with name "tpe_gid" is created.
74678 +
74679 +endmenu
74680 +menu "Network Protections"
74681 +depends on GRKERNSEC
74682 +
74683 +config GRKERNSEC_RANDNET
74684 + bool "Larger entropy pools"
74685 + help
74686 + If you say Y here, the entropy pools used for many features of Linux
74687 + and grsecurity will be doubled in size. Since several grsecurity
74688 + features use additional randomness, it is recommended that you say Y
74689 + here. Saying Y here has a similar effect as modifying
74690 + /proc/sys/kernel/random/poolsize.
74691 +
74692 +config GRKERNSEC_BLACKHOLE
74693 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
74694 + depends on NET
74695 + help
74696 + If you say Y here, neither TCP resets nor ICMP
74697 + destination-unreachable packets will be sent in response to packets
74698 + sent to ports for which no associated listening process exists.
74699 + This feature supports both IPV4 and IPV6 and exempts the
74700 + loopback interface from blackholing. Enabling this feature
74701 + makes a host more resilient to DoS attacks and reduces network
74702 + visibility against scanners.
74703 +
74704 + The blackhole feature as-implemented is equivalent to the FreeBSD
74705 + blackhole feature, as it prevents RST responses to all packets, not
74706 + just SYNs. Under most application behavior this causes no
74707 + problems, but applications (like haproxy) may not close certain
74708 + connections in a way that cleanly terminates them on the remote
74709 + end, leaving the remote host in LAST_ACK state. Because of this
74710 + side-effect and to prevent intentional LAST_ACK DoSes, this
74711 + feature also adds automatic mitigation against such attacks.
74712 + The mitigation drastically reduces the amount of time a socket
74713 + can spend in LAST_ACK state. If you're using haproxy and not
74714 + all servers it connects to have this option enabled, consider
74715 + disabling this feature on the haproxy host.
74716 +
74717 + If the sysctl option is enabled, two sysctl options with names
74718 + "ip_blackhole" and "lastack_retries" will be created.
74719 + While "ip_blackhole" takes the standard zero/non-zero on/off
74720 + toggle, "lastack_retries" uses the same kinds of values as
74721 + "tcp_retries1" and "tcp_retries2". The default value of 4
74722 + prevents a socket from lasting more than 45 seconds in LAST_ACK
74723 + state.
74724 +
74725 +config GRKERNSEC_SOCKET
74726 + bool "Socket restrictions"
74727 + depends on NET
74728 + help
74729 + If you say Y here, you will be able to choose from several options.
74730 + If you assign a GID on your system and add it to the supplementary
74731 + groups of users you want to restrict socket access to, this patch
74732 + will perform up to three things, based on the option(s) you choose.
74733 +
74734 +config GRKERNSEC_SOCKET_ALL
74735 + bool "Deny any sockets to group"
74736 + depends on GRKERNSEC_SOCKET
74737 + help
74738 + If you say Y here, you will be able to choose a GID of whose users will
74739 + be unable to connect to other hosts from your machine or run server
74740 + applications from your machine. If the sysctl option is enabled, a
74741 + sysctl option with name "socket_all" is created.
74742 +
74743 +config GRKERNSEC_SOCKET_ALL_GID
74744 + int "GID to deny all sockets for"
74745 + depends on GRKERNSEC_SOCKET_ALL
74746 + default 1004
74747 + help
74748 + Here you can choose the GID to disable socket access for. Remember to
74749 + add the users you want socket access disabled for to the GID
74750 + specified here. If the sysctl option is enabled, a sysctl option
74751 + with name "socket_all_gid" is created.
74752 +
74753 +config GRKERNSEC_SOCKET_CLIENT
74754 + bool "Deny client sockets to group"
74755 + depends on GRKERNSEC_SOCKET
74756 + help
74757 + If you say Y here, you will be able to choose a GID of whose users will
74758 + be unable to connect to other hosts from your machine, but will be
74759 + able to run servers. If this option is enabled, all users in the group
74760 + you specify will have to use passive mode when initiating ftp transfers
74761 + from the shell on your machine. If the sysctl option is enabled, a
74762 + sysctl option with name "socket_client" is created.
74763 +
74764 +config GRKERNSEC_SOCKET_CLIENT_GID
74765 + int "GID to deny client sockets for"
74766 + depends on GRKERNSEC_SOCKET_CLIENT
74767 + default 1003
74768 + help
74769 + Here you can choose the GID to disable client socket access for.
74770 + Remember to add the users you want client socket access disabled for to
74771 + the GID specified here. If the sysctl option is enabled, a sysctl
74772 + option with name "socket_client_gid" is created.
74773 +
74774 +config GRKERNSEC_SOCKET_SERVER
74775 + bool "Deny server sockets to group"
74776 + depends on GRKERNSEC_SOCKET
74777 + help
74778 + If you say Y here, you will be able to choose a GID of whose users will
74779 + be unable to run server applications from your machine. If the sysctl
74780 + option is enabled, a sysctl option with name "socket_server" is created.
74781 +
74782 +config GRKERNSEC_SOCKET_SERVER_GID
74783 + int "GID to deny server sockets for"
74784 + depends on GRKERNSEC_SOCKET_SERVER
74785 + default 1002
74786 + help
74787 + Here you can choose the GID to disable server socket access for.
74788 + Remember to add the users you want server socket access disabled for to
74789 + the GID specified here. If the sysctl option is enabled, a sysctl
74790 + option with name "socket_server_gid" is created.
74791 +
74792 +endmenu
74793 +menu "Sysctl support"
74794 +depends on GRKERNSEC && SYSCTL
74795 +
74796 +config GRKERNSEC_SYSCTL
74797 + bool "Sysctl support"
74798 + help
74799 + If you say Y here, you will be able to change the options that
74800 + grsecurity runs with at bootup, without having to recompile your
74801 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
74802 + to enable (1) or disable (0) various features. All the sysctl entries
74803 + are mutable until the "grsec_lock" entry is set to a non-zero value.
74804 + All features enabled in the kernel configuration are disabled at boot
74805 + if you do not say Y to the "Turn on features by default" option.
74806 + All options should be set at startup, and the grsec_lock entry should
74807 + be set to a non-zero value after all the options are set.
74808 + *THIS IS EXTREMELY IMPORTANT*
74809 +
74810 +config GRKERNSEC_SYSCTL_DISTRO
74811 + bool "Extra sysctl support for distro makers (READ HELP)"
74812 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
74813 + help
74814 + If you say Y here, additional sysctl options will be created
74815 + for features that affect processes running as root. Therefore,
74816 + it is critical when using this option that the grsec_lock entry be
74817 + enabled after boot. Only distros with prebuilt kernel packages
74818 + with this option enabled that can ensure grsec_lock is enabled
74819 + after boot should use this option.
74820 + *Failure to set grsec_lock after boot makes all grsec features
74821 + this option covers useless*
74822 +
74823 + Currently this option creates the following sysctl entries:
74824 + "Disable Privileged I/O": "disable_priv_io"
74825 +
74826 +config GRKERNSEC_SYSCTL_ON
74827 + bool "Turn on features by default"
74828 + depends on GRKERNSEC_SYSCTL
74829 + help
74830 + If you say Y here, instead of having all features enabled in the
74831 + kernel configuration disabled at boot time, the features will be
74832 + enabled at boot time. It is recommended you say Y here unless
74833 + there is some reason you would want all sysctl-tunable features to
74834 + be disabled by default. As mentioned elsewhere, it is important
74835 + to enable the grsec_lock entry once you have finished modifying
74836 + the sysctl entries.
74837 +
74838 +endmenu
74839 +menu "Logging Options"
74840 +depends on GRKERNSEC
74841 +
74842 +config GRKERNSEC_FLOODTIME
74843 + int "Seconds in between log messages (minimum)"
74844 + default 10
74845 + help
74846 + This option allows you to enforce the number of seconds between
74847 + grsecurity log messages. The default should be suitable for most
74848 + people, however, if you choose to change it, choose a value small enough
74849 + to allow informative logs to be produced, but large enough to
74850 + prevent flooding.
74851 +
74852 +config GRKERNSEC_FLOODBURST
74853 + int "Number of messages in a burst (maximum)"
74854 + default 6
74855 + help
74856 + This option allows you to choose the maximum number of messages allowed
74857 + within the flood time interval you chose in a separate option. The
74858 + default should be suitable for most people, however if you find that
74859 + many of your logs are being interpreted as flooding, you may want to
74860 + raise this value.
74861 +
74862 +endmenu
74863 +
74864 +endmenu
74865 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
74866 new file mode 100644
74867 index 0000000..1b9afa9
74868 --- /dev/null
74869 +++ b/grsecurity/Makefile
74870 @@ -0,0 +1,38 @@
74871 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
74872 +# during 2001-2009 it has been completely redesigned by Brad Spengler
74873 +# into an RBAC system
74874 +#
74875 +# All code in this directory and various hooks inserted throughout the kernel
74876 +# are copyright Brad Spengler - Open Source Security, Inc., and released
74877 +# under the GPL v2 or higher
74878 +
74879 +KBUILD_CFLAGS += -Werror
74880 +
74881 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
74882 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
74883 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
74884 +
74885 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
74886 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
74887 + gracl_learn.o grsec_log.o
74888 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
74889 +
74890 +ifdef CONFIG_NET
74891 +obj-y += grsec_sock.o
74892 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
74893 +endif
74894 +
74895 +ifndef CONFIG_GRKERNSEC
74896 +obj-y += grsec_disabled.o
74897 +endif
74898 +
74899 +ifdef CONFIG_GRKERNSEC_HIDESYM
74900 +extra-y := grsec_hidesym.o
74901 +$(obj)/grsec_hidesym.o:
74902 + @-chmod -f 500 /boot
74903 + @-chmod -f 500 /lib/modules
74904 + @-chmod -f 500 /lib64/modules
74905 + @-chmod -f 500 /lib32/modules
74906 + @-chmod -f 700 .
74907 + @echo ' grsec: protected kernel image paths'
74908 +endif
74909 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
74910 new file mode 100644
74911 index 0000000..dc4812b
74912 --- /dev/null
74913 +++ b/grsecurity/gracl.c
74914 @@ -0,0 +1,4148 @@
74915 +#include <linux/kernel.h>
74916 +#include <linux/module.h>
74917 +#include <linux/sched.h>
74918 +#include <linux/mm.h>
74919 +#include <linux/file.h>
74920 +#include <linux/fs.h>
74921 +#include <linux/namei.h>
74922 +#include <linux/mount.h>
74923 +#include <linux/tty.h>
74924 +#include <linux/proc_fs.h>
74925 +#include <linux/smp_lock.h>
74926 +#include <linux/slab.h>
74927 +#include <linux/vmalloc.h>
74928 +#include <linux/types.h>
74929 +#include <linux/sysctl.h>
74930 +#include <linux/netdevice.h>
74931 +#include <linux/ptrace.h>
74932 +#include <linux/gracl.h>
74933 +#include <linux/gralloc.h>
74934 +#include <linux/security.h>
74935 +#include <linux/grinternal.h>
74936 +#include <linux/pid_namespace.h>
74937 +#include <linux/fdtable.h>
74938 +#include <linux/percpu.h>
74939 +
74940 +#include <asm/uaccess.h>
74941 +#include <asm/errno.h>
74942 +#include <asm/mman.h>
74943 +
74944 +static struct acl_role_db acl_role_set;
74945 +static struct name_db name_set;
74946 +static struct inodev_db inodev_set;
74947 +
74948 +/* for keeping track of userspace pointers used for subjects, so we
74949 + can share references in the kernel as well
74950 +*/
74951 +
74952 +static struct dentry *real_root;
74953 +static struct vfsmount *real_root_mnt;
74954 +
74955 +static struct acl_subj_map_db subj_map_set;
74956 +
74957 +static struct acl_role_label *default_role;
74958 +
74959 +static struct acl_role_label *role_list;
74960 +
74961 +static u16 acl_sp_role_value;
74962 +
74963 +extern char *gr_shared_page[4];
74964 +static DEFINE_MUTEX(gr_dev_mutex);
74965 +DEFINE_RWLOCK(gr_inode_lock);
74966 +
74967 +struct gr_arg *gr_usermode;
74968 +
74969 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
74970 +
74971 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
74972 +extern void gr_clear_learn_entries(void);
74973 +
74974 +#ifdef CONFIG_GRKERNSEC_RESLOG
74975 +extern void gr_log_resource(const struct task_struct *task,
74976 + const int res, const unsigned long wanted, const int gt);
74977 +#endif
74978 +
74979 +unsigned char *gr_system_salt;
74980 +unsigned char *gr_system_sum;
74981 +
74982 +static struct sprole_pw **acl_special_roles = NULL;
74983 +static __u16 num_sprole_pws = 0;
74984 +
74985 +static struct acl_role_label *kernel_role = NULL;
74986 +
74987 +static unsigned int gr_auth_attempts = 0;
74988 +static unsigned long gr_auth_expires = 0UL;
74989 +
74990 +#ifdef CONFIG_NET
74991 +extern struct vfsmount *sock_mnt;
74992 +#endif
74993 +extern struct vfsmount *pipe_mnt;
74994 +extern struct vfsmount *shm_mnt;
74995 +#ifdef CONFIG_HUGETLBFS
74996 +extern struct vfsmount *hugetlbfs_vfsmount;
74997 +#endif
74998 +
74999 +static struct acl_object_label *fakefs_obj_rw;
75000 +static struct acl_object_label *fakefs_obj_rwx;
75001 +
75002 +extern int gr_init_uidset(void);
75003 +extern void gr_free_uidset(void);
75004 +extern void gr_remove_uid(uid_t uid);
75005 +extern int gr_find_uid(uid_t uid);
75006 +
75007 +__inline__ int
75008 +gr_acl_is_enabled(void)
75009 +{
75010 + return (gr_status & GR_READY);
75011 +}
75012 +
75013 +#ifdef CONFIG_BTRFS_FS
75014 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
75015 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
75016 +#endif
75017 +
75018 +static inline dev_t __get_dev(const struct dentry *dentry)
75019 +{
75020 +#ifdef CONFIG_BTRFS_FS
75021 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
75022 + return get_btrfs_dev_from_inode(dentry->d_inode);
75023 + else
75024 +#endif
75025 + return dentry->d_inode->i_sb->s_dev;
75026 +}
75027 +
75028 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
75029 +{
75030 + return __get_dev(dentry);
75031 +}
75032 +
75033 +static char gr_task_roletype_to_char(struct task_struct *task)
75034 +{
75035 + switch (task->role->roletype &
75036 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
75037 + GR_ROLE_SPECIAL)) {
75038 + case GR_ROLE_DEFAULT:
75039 + return 'D';
75040 + case GR_ROLE_USER:
75041 + return 'U';
75042 + case GR_ROLE_GROUP:
75043 + return 'G';
75044 + case GR_ROLE_SPECIAL:
75045 + return 'S';
75046 + }
75047 +
75048 + return 'X';
75049 +}
75050 +
75051 +char gr_roletype_to_char(void)
75052 +{
75053 + return gr_task_roletype_to_char(current);
75054 +}
75055 +
75056 +__inline__ int
75057 +gr_acl_tpe_check(void)
75058 +{
75059 + if (unlikely(!(gr_status & GR_READY)))
75060 + return 0;
75061 + if (current->role->roletype & GR_ROLE_TPE)
75062 + return 1;
75063 + else
75064 + return 0;
75065 +}
75066 +
75067 +int
75068 +gr_handle_rawio(const struct inode *inode)
75069 +{
75070 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
75071 + if (inode && S_ISBLK(inode->i_mode) &&
75072 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
75073 + !capable(CAP_SYS_RAWIO))
75074 + return 1;
75075 +#endif
75076 + return 0;
75077 +}
75078 +
75079 +static int
75080 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
75081 +{
75082 + if (likely(lena != lenb))
75083 + return 0;
75084 +
75085 + return !memcmp(a, b, lena);
75086 +}
75087 +
75088 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
75089 +{
75090 + *buflen -= namelen;
75091 + if (*buflen < 0)
75092 + return -ENAMETOOLONG;
75093 + *buffer -= namelen;
75094 + memcpy(*buffer, str, namelen);
75095 + return 0;
75096 +}
75097 +
75098 +/* this must be called with vfsmount_lock and dcache_lock held */
75099 +
75100 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
75101 + struct dentry *root, struct vfsmount *rootmnt,
75102 + char *buffer, int buflen)
75103 +{
75104 + char * end = buffer+buflen;
75105 + char * retval;
75106 + int namelen;
75107 +
75108 + *--end = '\0';
75109 + buflen--;
75110 +
75111 + if (buflen < 1)
75112 + goto Elong;
75113 + /* Get '/' right */
75114 + retval = end-1;
75115 + *retval = '/';
75116 +
75117 + for (;;) {
75118 + struct dentry * parent;
75119 +
75120 + if (dentry == root && vfsmnt == rootmnt)
75121 + break;
75122 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
75123 + /* Global root? */
75124 + if (vfsmnt->mnt_parent == vfsmnt)
75125 + goto global_root;
75126 + dentry = vfsmnt->mnt_mountpoint;
75127 + vfsmnt = vfsmnt->mnt_parent;
75128 + continue;
75129 + }
75130 + parent = dentry->d_parent;
75131 + prefetch(parent);
75132 + namelen = dentry->d_name.len;
75133 + buflen -= namelen + 1;
75134 + if (buflen < 0)
75135 + goto Elong;
75136 + end -= namelen;
75137 + memcpy(end, dentry->d_name.name, namelen);
75138 + *--end = '/';
75139 + retval = end;
75140 + dentry = parent;
75141 + }
75142 +
75143 +out:
75144 + return retval;
75145 +
75146 +global_root:
75147 + namelen = dentry->d_name.len;
75148 + buflen -= namelen;
75149 + if (buflen < 0)
75150 + goto Elong;
75151 + retval -= namelen-1; /* hit the slash */
75152 + memcpy(retval, dentry->d_name.name, namelen);
75153 + goto out;
75154 +Elong:
75155 + retval = ERR_PTR(-ENAMETOOLONG);
75156 + goto out;
75157 +}
75158 +
75159 +static char *
75160 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
75161 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
75162 +{
75163 + char *retval;
75164 +
75165 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
75166 + if (unlikely(IS_ERR(retval)))
75167 + retval = strcpy(buf, "<path too long>");
75168 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
75169 + retval[1] = '\0';
75170 +
75171 + return retval;
75172 +}
75173 +
75174 +static char *
75175 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
75176 + char *buf, int buflen)
75177 +{
75178 + char *res;
75179 +
75180 + /* we can use real_root, real_root_mnt, because this is only called
75181 + by the RBAC system */
75182 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
75183 +
75184 + return res;
75185 +}
75186 +
75187 +static char *
75188 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
75189 + char *buf, int buflen)
75190 +{
75191 + char *res;
75192 + struct dentry *root;
75193 + struct vfsmount *rootmnt;
75194 + struct task_struct *reaper = &init_task;
75195 +
75196 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
75197 + read_lock(&reaper->fs->lock);
75198 + root = dget(reaper->fs->root.dentry);
75199 + rootmnt = mntget(reaper->fs->root.mnt);
75200 + read_unlock(&reaper->fs->lock);
75201 +
75202 + spin_lock(&dcache_lock);
75203 + spin_lock(&vfsmount_lock);
75204 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
75205 + spin_unlock(&vfsmount_lock);
75206 + spin_unlock(&dcache_lock);
75207 +
75208 + dput(root);
75209 + mntput(rootmnt);
75210 + return res;
75211 +}
75212 +
75213 +static char *
75214 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
75215 +{
75216 + char *ret;
75217 + spin_lock(&dcache_lock);
75218 + spin_lock(&vfsmount_lock);
75219 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
75220 + PAGE_SIZE);
75221 + spin_unlock(&vfsmount_lock);
75222 + spin_unlock(&dcache_lock);
75223 + return ret;
75224 +}
75225 +
75226 +static char *
75227 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
75228 +{
75229 + char *ret;
75230 + char *buf;
75231 + int buflen;
75232 +
75233 + spin_lock(&dcache_lock);
75234 + spin_lock(&vfsmount_lock);
75235 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
75236 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
75237 + buflen = (int)(ret - buf);
75238 + if (buflen >= 5)
75239 + prepend(&ret, &buflen, "/proc", 5);
75240 + else
75241 + ret = strcpy(buf, "<path too long>");
75242 + spin_unlock(&vfsmount_lock);
75243 + spin_unlock(&dcache_lock);
75244 + return ret;
75245 +}
75246 +
75247 +char *
75248 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
75249 +{
75250 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
75251 + PAGE_SIZE);
75252 +}
75253 +
75254 +char *
75255 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
75256 +{
75257 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
75258 + PAGE_SIZE);
75259 +}
75260 +
75261 +char *
75262 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
75263 +{
75264 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
75265 + PAGE_SIZE);
75266 +}
75267 +
75268 +char *
75269 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
75270 +{
75271 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
75272 + PAGE_SIZE);
75273 +}
75274 +
75275 +char *
75276 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
75277 +{
75278 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
75279 + PAGE_SIZE);
75280 +}
75281 +
75282 +__inline__ __u32
75283 +to_gr_audit(const __u32 reqmode)
75284 +{
75285 + /* masks off auditable permission flags, then shifts them to create
75286 + auditing flags, and adds the special case of append auditing if
75287 + we're requesting write */
75288 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
75289 +}
75290 +
75291 +struct acl_subject_label *
75292 +lookup_subject_map(const struct acl_subject_label *userp)
75293 +{
75294 + unsigned int index = shash(userp, subj_map_set.s_size);
75295 + struct subject_map *match;
75296 +
75297 + match = subj_map_set.s_hash[index];
75298 +
75299 + while (match && match->user != userp)
75300 + match = match->next;
75301 +
75302 + if (match != NULL)
75303 + return match->kernel;
75304 + else
75305 + return NULL;
75306 +}
75307 +
75308 +static void
75309 +insert_subj_map_entry(struct subject_map *subjmap)
75310 +{
75311 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
75312 + struct subject_map **curr;
75313 +
75314 + subjmap->prev = NULL;
75315 +
75316 + curr = &subj_map_set.s_hash[index];
75317 + if (*curr != NULL)
75318 + (*curr)->prev = subjmap;
75319 +
75320 + subjmap->next = *curr;
75321 + *curr = subjmap;
75322 +
75323 + return;
75324 +}
75325 +
75326 +static struct acl_role_label *
75327 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
75328 + const gid_t gid)
75329 +{
75330 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
75331 + struct acl_role_label *match;
75332 + struct role_allowed_ip *ipp;
75333 + unsigned int x;
75334 + u32 curr_ip = task->signal->curr_ip;
75335 +
75336 + task->signal->saved_ip = curr_ip;
75337 +
75338 + match = acl_role_set.r_hash[index];
75339 +
75340 + while (match) {
75341 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
75342 + for (x = 0; x < match->domain_child_num; x++) {
75343 + if (match->domain_children[x] == uid)
75344 + goto found;
75345 + }
75346 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
75347 + break;
75348 + match = match->next;
75349 + }
75350 +found:
75351 + if (match == NULL) {
75352 + try_group:
75353 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
75354 + match = acl_role_set.r_hash[index];
75355 +
75356 + while (match) {
75357 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
75358 + for (x = 0; x < match->domain_child_num; x++) {
75359 + if (match->domain_children[x] == gid)
75360 + goto found2;
75361 + }
75362 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
75363 + break;
75364 + match = match->next;
75365 + }
75366 +found2:
75367 + if (match == NULL)
75368 + match = default_role;
75369 + if (match->allowed_ips == NULL)
75370 + return match;
75371 + else {
75372 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
75373 + if (likely
75374 + ((ntohl(curr_ip) & ipp->netmask) ==
75375 + (ntohl(ipp->addr) & ipp->netmask)))
75376 + return match;
75377 + }
75378 + match = default_role;
75379 + }
75380 + } else if (match->allowed_ips == NULL) {
75381 + return match;
75382 + } else {
75383 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
75384 + if (likely
75385 + ((ntohl(curr_ip) & ipp->netmask) ==
75386 + (ntohl(ipp->addr) & ipp->netmask)))
75387 + return match;
75388 + }
75389 + goto try_group;
75390 + }
75391 +
75392 + return match;
75393 +}
75394 +
75395 +struct acl_subject_label *
75396 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
75397 + const struct acl_role_label *role)
75398 +{
75399 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
75400 + struct acl_subject_label *match;
75401 +
75402 + match = role->subj_hash[index];
75403 +
75404 + while (match && (match->inode != ino || match->device != dev ||
75405 + (match->mode & GR_DELETED))) {
75406 + match = match->next;
75407 + }
75408 +
75409 + if (match && !(match->mode & GR_DELETED))
75410 + return match;
75411 + else
75412 + return NULL;
75413 +}
75414 +
75415 +struct acl_subject_label *
75416 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
75417 + const struct acl_role_label *role)
75418 +{
75419 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
75420 + struct acl_subject_label *match;
75421 +
75422 + match = role->subj_hash[index];
75423 +
75424 + while (match && (match->inode != ino || match->device != dev ||
75425 + !(match->mode & GR_DELETED))) {
75426 + match = match->next;
75427 + }
75428 +
75429 + if (match && (match->mode & GR_DELETED))
75430 + return match;
75431 + else
75432 + return NULL;
75433 +}
75434 +
75435 +static struct acl_object_label *
75436 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
75437 + const struct acl_subject_label *subj)
75438 +{
75439 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
75440 + struct acl_object_label *match;
75441 +
75442 + match = subj->obj_hash[index];
75443 +
75444 + while (match && (match->inode != ino || match->device != dev ||
75445 + (match->mode & GR_DELETED))) {
75446 + match = match->next;
75447 + }
75448 +
75449 + if (match && !(match->mode & GR_DELETED))
75450 + return match;
75451 + else
75452 + return NULL;
75453 +}
75454 +
75455 +static struct acl_object_label *
75456 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
75457 + const struct acl_subject_label *subj)
75458 +{
75459 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
75460 + struct acl_object_label *match;
75461 +
75462 + match = subj->obj_hash[index];
75463 +
75464 + while (match && (match->inode != ino || match->device != dev ||
75465 + !(match->mode & GR_DELETED))) {
75466 + match = match->next;
75467 + }
75468 +
75469 + if (match && (match->mode & GR_DELETED))
75470 + return match;
75471 +
75472 + match = subj->obj_hash[index];
75473 +
75474 + while (match && (match->inode != ino || match->device != dev ||
75475 + (match->mode & GR_DELETED))) {
75476 + match = match->next;
75477 + }
75478 +
75479 + if (match && !(match->mode & GR_DELETED))
75480 + return match;
75481 + else
75482 + return NULL;
75483 +}
75484 +
75485 +static struct name_entry *
75486 +lookup_name_entry(const char *name)
75487 +{
75488 + unsigned int len = strlen(name);
75489 + unsigned int key = full_name_hash(name, len);
75490 + unsigned int index = key % name_set.n_size;
75491 + struct name_entry *match;
75492 +
75493 + match = name_set.n_hash[index];
75494 +
75495 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
75496 + match = match->next;
75497 +
75498 + return match;
75499 +}
75500 +
75501 +static struct name_entry *
75502 +lookup_name_entry_create(const char *name)
75503 +{
75504 + unsigned int len = strlen(name);
75505 + unsigned int key = full_name_hash(name, len);
75506 + unsigned int index = key % name_set.n_size;
75507 + struct name_entry *match;
75508 +
75509 + match = name_set.n_hash[index];
75510 +
75511 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
75512 + !match->deleted))
75513 + match = match->next;
75514 +
75515 + if (match && match->deleted)
75516 + return match;
75517 +
75518 + match = name_set.n_hash[index];
75519 +
75520 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
75521 + match->deleted))
75522 + match = match->next;
75523 +
75524 + if (match && !match->deleted)
75525 + return match;
75526 + else
75527 + return NULL;
75528 +}
75529 +
75530 +static struct inodev_entry *
75531 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
75532 +{
75533 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
75534 + struct inodev_entry *match;
75535 +
75536 + match = inodev_set.i_hash[index];
75537 +
75538 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
75539 + match = match->next;
75540 +
75541 + return match;
75542 +}
75543 +
75544 +static void
75545 +insert_inodev_entry(struct inodev_entry *entry)
75546 +{
75547 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
75548 + inodev_set.i_size);
75549 + struct inodev_entry **curr;
75550 +
75551 + entry->prev = NULL;
75552 +
75553 + curr = &inodev_set.i_hash[index];
75554 + if (*curr != NULL)
75555 + (*curr)->prev = entry;
75556 +
75557 + entry->next = *curr;
75558 + *curr = entry;
75559 +
75560 + return;
75561 +}
75562 +
75563 +static void
75564 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
75565 +{
75566 + unsigned int index =
75567 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
75568 + struct acl_role_label **curr;
75569 + struct acl_role_label *tmp;
75570 +
75571 + curr = &acl_role_set.r_hash[index];
75572 +
75573 + /* if role was already inserted due to domains and already has
75574 + a role in the same bucket as it attached, then we need to
75575 + combine these two buckets
75576 + */
75577 + if (role->next) {
75578 + tmp = role->next;
75579 + while (tmp->next)
75580 + tmp = tmp->next;
75581 + tmp->next = *curr;
75582 + } else
75583 + role->next = *curr;
75584 + *curr = role;
75585 +
75586 + return;
75587 +}
75588 +
75589 +static void
75590 +insert_acl_role_label(struct acl_role_label *role)
75591 +{
75592 + int i;
75593 +
75594 + if (role_list == NULL) {
75595 + role_list = role;
75596 + role->prev = NULL;
75597 + } else {
75598 + role->prev = role_list;
75599 + role_list = role;
75600 + }
75601 +
75602 + /* used for hash chains */
75603 + role->next = NULL;
75604 +
75605 + if (role->roletype & GR_ROLE_DOMAIN) {
75606 + for (i = 0; i < role->domain_child_num; i++)
75607 + __insert_acl_role_label(role, role->domain_children[i]);
75608 + } else
75609 + __insert_acl_role_label(role, role->uidgid);
75610 +}
75611 +
75612 +static int
75613 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
75614 +{
75615 + struct name_entry **curr, *nentry;
75616 + struct inodev_entry *ientry;
75617 + unsigned int len = strlen(name);
75618 + unsigned int key = full_name_hash(name, len);
75619 + unsigned int index = key % name_set.n_size;
75620 +
75621 + curr = &name_set.n_hash[index];
75622 +
75623 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
75624 + curr = &((*curr)->next);
75625 +
75626 + if (*curr != NULL)
75627 + return 1;
75628 +
75629 + nentry = acl_alloc(sizeof (struct name_entry));
75630 + if (nentry == NULL)
75631 + return 0;
75632 + ientry = acl_alloc(sizeof (struct inodev_entry));
75633 + if (ientry == NULL)
75634 + return 0;
75635 + ientry->nentry = nentry;
75636 +
75637 + nentry->key = key;
75638 + nentry->name = name;
75639 + nentry->inode = inode;
75640 + nentry->device = device;
75641 + nentry->len = len;
75642 + nentry->deleted = deleted;
75643 +
75644 + nentry->prev = NULL;
75645 + curr = &name_set.n_hash[index];
75646 + if (*curr != NULL)
75647 + (*curr)->prev = nentry;
75648 + nentry->next = *curr;
75649 + *curr = nentry;
75650 +
75651 + /* insert us into the table searchable by inode/dev */
75652 + insert_inodev_entry(ientry);
75653 +
75654 + return 1;
75655 +}
75656 +
75657 +static void
75658 +insert_acl_obj_label(struct acl_object_label *obj,
75659 + struct acl_subject_label *subj)
75660 +{
75661 + unsigned int index =
75662 + fhash(obj->inode, obj->device, subj->obj_hash_size);
75663 + struct acl_object_label **curr;
75664 +
75665 +
75666 + obj->prev = NULL;
75667 +
75668 + curr = &subj->obj_hash[index];
75669 + if (*curr != NULL)
75670 + (*curr)->prev = obj;
75671 +
75672 + obj->next = *curr;
75673 + *curr = obj;
75674 +
75675 + return;
75676 +}
75677 +
75678 +static void
75679 +insert_acl_subj_label(struct acl_subject_label *obj,
75680 + struct acl_role_label *role)
75681 +{
75682 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
75683 + struct acl_subject_label **curr;
75684 +
75685 + obj->prev = NULL;
75686 +
75687 + curr = &role->subj_hash[index];
75688 + if (*curr != NULL)
75689 + (*curr)->prev = obj;
75690 +
75691 + obj->next = *curr;
75692 + *curr = obj;
75693 +
75694 + return;
75695 +}
75696 +
75697 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75698 +
75699 +static void *
75700 +create_table(__u32 * len, int elementsize)
75701 +{
75702 + unsigned int table_sizes[] = {
75703 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75704 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75705 + 4194301, 8388593, 16777213, 33554393, 67108859
75706 + };
75707 + void *newtable = NULL;
75708 + unsigned int pwr = 0;
75709 +
75710 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75711 + table_sizes[pwr] <= *len)
75712 + pwr++;
75713 +
75714 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75715 + return newtable;
75716 +
75717 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75718 + newtable =
75719 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75720 + else
75721 + newtable = vmalloc(table_sizes[pwr] * elementsize);
75722 +
75723 + *len = table_sizes[pwr];
75724 +
75725 + return newtable;
75726 +}
75727 +
75728 +static int
75729 +init_variables(const struct gr_arg *arg)
75730 +{
75731 + struct task_struct *reaper = &init_task;
75732 + unsigned int stacksize;
75733 +
75734 + subj_map_set.s_size = arg->role_db.num_subjects;
75735 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75736 + name_set.n_size = arg->role_db.num_objects;
75737 + inodev_set.i_size = arg->role_db.num_objects;
75738 +
75739 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
75740 + !name_set.n_size || !inodev_set.i_size)
75741 + return 1;
75742 +
75743 + if (!gr_init_uidset())
75744 + return 1;
75745 +
75746 + /* set up the stack that holds allocation info */
75747 +
75748 + stacksize = arg->role_db.num_pointers + 5;
75749 +
75750 + if (!acl_alloc_stack_init(stacksize))
75751 + return 1;
75752 +
75753 + /* grab reference for the real root dentry and vfsmount */
75754 + read_lock(&reaper->fs->lock);
75755 + real_root = dget(reaper->fs->root.dentry);
75756 + real_root_mnt = mntget(reaper->fs->root.mnt);
75757 + read_unlock(&reaper->fs->lock);
75758 +
75759 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75760 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
75761 +#endif
75762 +
75763 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
75764 + if (fakefs_obj_rw == NULL)
75765 + return 1;
75766 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75767 +
75768 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
75769 + if (fakefs_obj_rwx == NULL)
75770 + return 1;
75771 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75772 +
75773 + subj_map_set.s_hash =
75774 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
75775 + acl_role_set.r_hash =
75776 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
75777 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
75778 + inodev_set.i_hash =
75779 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
75780 +
75781 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
75782 + !name_set.n_hash || !inodev_set.i_hash)
75783 + return 1;
75784 +
75785 + memset(subj_map_set.s_hash, 0,
75786 + sizeof(struct subject_map *) * subj_map_set.s_size);
75787 + memset(acl_role_set.r_hash, 0,
75788 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
75789 + memset(name_set.n_hash, 0,
75790 + sizeof (struct name_entry *) * name_set.n_size);
75791 + memset(inodev_set.i_hash, 0,
75792 + sizeof (struct inodev_entry *) * inodev_set.i_size);
75793 +
75794 + return 0;
75795 +}
75796 +
75797 +/* free information not needed after startup
75798 + currently contains user->kernel pointer mappings for subjects
75799 +*/
75800 +
75801 +static void
75802 +free_init_variables(void)
75803 +{
75804 + __u32 i;
75805 +
75806 + if (subj_map_set.s_hash) {
75807 + for (i = 0; i < subj_map_set.s_size; i++) {
75808 + if (subj_map_set.s_hash[i]) {
75809 + kfree(subj_map_set.s_hash[i]);
75810 + subj_map_set.s_hash[i] = NULL;
75811 + }
75812 + }
75813 +
75814 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
75815 + PAGE_SIZE)
75816 + kfree(subj_map_set.s_hash);
75817 + else
75818 + vfree(subj_map_set.s_hash);
75819 + }
75820 +
75821 + return;
75822 +}
75823 +
75824 +static void
75825 +free_variables(void)
75826 +{
75827 + struct acl_subject_label *s;
75828 + struct acl_role_label *r;
75829 + struct task_struct *task, *task2;
75830 + unsigned int x;
75831 +
75832 + gr_clear_learn_entries();
75833 +
75834 + read_lock(&tasklist_lock);
75835 + do_each_thread(task2, task) {
75836 + task->acl_sp_role = 0;
75837 + task->acl_role_id = 0;
75838 + task->acl = NULL;
75839 + task->role = NULL;
75840 + } while_each_thread(task2, task);
75841 + read_unlock(&tasklist_lock);
75842 +
75843 + /* release the reference to the real root dentry and vfsmount */
75844 + if (real_root)
75845 + dput(real_root);
75846 + real_root = NULL;
75847 + if (real_root_mnt)
75848 + mntput(real_root_mnt);
75849 + real_root_mnt = NULL;
75850 +
75851 + /* free all object hash tables */
75852 +
75853 + FOR_EACH_ROLE_START(r)
75854 + if (r->subj_hash == NULL)
75855 + goto next_role;
75856 + FOR_EACH_SUBJECT_START(r, s, x)
75857 + if (s->obj_hash == NULL)
75858 + break;
75859 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75860 + kfree(s->obj_hash);
75861 + else
75862 + vfree(s->obj_hash);
75863 + FOR_EACH_SUBJECT_END(s, x)
75864 + FOR_EACH_NESTED_SUBJECT_START(r, s)
75865 + if (s->obj_hash == NULL)
75866 + break;
75867 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75868 + kfree(s->obj_hash);
75869 + else
75870 + vfree(s->obj_hash);
75871 + FOR_EACH_NESTED_SUBJECT_END(s)
75872 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75873 + kfree(r->subj_hash);
75874 + else
75875 + vfree(r->subj_hash);
75876 + r->subj_hash = NULL;
75877 +next_role:
75878 + FOR_EACH_ROLE_END(r)
75879 +
75880 + acl_free_all();
75881 +
75882 + if (acl_role_set.r_hash) {
75883 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75884 + PAGE_SIZE)
75885 + kfree(acl_role_set.r_hash);
75886 + else
75887 + vfree(acl_role_set.r_hash);
75888 + }
75889 + if (name_set.n_hash) {
75890 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
75891 + PAGE_SIZE)
75892 + kfree(name_set.n_hash);
75893 + else
75894 + vfree(name_set.n_hash);
75895 + }
75896 +
75897 + if (inodev_set.i_hash) {
75898 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75899 + PAGE_SIZE)
75900 + kfree(inodev_set.i_hash);
75901 + else
75902 + vfree(inodev_set.i_hash);
75903 + }
75904 +
75905 + gr_free_uidset();
75906 +
75907 + memset(&name_set, 0, sizeof (struct name_db));
75908 + memset(&inodev_set, 0, sizeof (struct inodev_db));
75909 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
75910 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
75911 +
75912 + default_role = NULL;
75913 + role_list = NULL;
75914 +
75915 + return;
75916 +}
75917 +
75918 +static __u32
75919 +count_user_objs(struct acl_object_label *userp)
75920 +{
75921 + struct acl_object_label o_tmp;
75922 + __u32 num = 0;
75923 +
75924 + while (userp) {
75925 + if (copy_from_user(&o_tmp, userp,
75926 + sizeof (struct acl_object_label)))
75927 + break;
75928 +
75929 + userp = o_tmp.prev;
75930 + num++;
75931 + }
75932 +
75933 + return num;
75934 +}
75935 +
75936 +static struct acl_subject_label *
75937 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
75938 +
75939 +static int
75940 +copy_user_glob(struct acl_object_label *obj)
75941 +{
75942 + struct acl_object_label *g_tmp, **guser;
75943 + unsigned int len;
75944 + char *tmp;
75945 +
75946 + if (obj->globbed == NULL)
75947 + return 0;
75948 +
75949 + guser = &obj->globbed;
75950 + while (*guser) {
75951 + g_tmp = (struct acl_object_label *)
75952 + acl_alloc(sizeof (struct acl_object_label));
75953 + if (g_tmp == NULL)
75954 + return -ENOMEM;
75955 +
75956 + if (copy_from_user(g_tmp, *guser,
75957 + sizeof (struct acl_object_label)))
75958 + return -EFAULT;
75959 +
75960 + len = strnlen_user(g_tmp->filename, PATH_MAX);
75961 +
75962 + if (!len || len >= PATH_MAX)
75963 + return -EINVAL;
75964 +
75965 + if ((tmp = (char *) acl_alloc(len)) == NULL)
75966 + return -ENOMEM;
75967 +
75968 + if (copy_from_user(tmp, g_tmp->filename, len))
75969 + return -EFAULT;
75970 + tmp[len-1] = '\0';
75971 + g_tmp->filename = tmp;
75972 +
75973 + *guser = g_tmp;
75974 + guser = &(g_tmp->next);
75975 + }
75976 +
75977 + return 0;
75978 +}
75979 +
75980 +static int
75981 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
75982 + struct acl_role_label *role)
75983 +{
75984 + struct acl_object_label *o_tmp;
75985 + unsigned int len;
75986 + int ret;
75987 + char *tmp;
75988 +
75989 + while (userp) {
75990 + if ((o_tmp = (struct acl_object_label *)
75991 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
75992 + return -ENOMEM;
75993 +
75994 + if (copy_from_user(o_tmp, userp,
75995 + sizeof (struct acl_object_label)))
75996 + return -EFAULT;
75997 +
75998 + userp = o_tmp->prev;
75999 +
76000 + len = strnlen_user(o_tmp->filename, PATH_MAX);
76001 +
76002 + if (!len || len >= PATH_MAX)
76003 + return -EINVAL;
76004 +
76005 + if ((tmp = (char *) acl_alloc(len)) == NULL)
76006 + return -ENOMEM;
76007 +
76008 + if (copy_from_user(tmp, o_tmp->filename, len))
76009 + return -EFAULT;
76010 + tmp[len-1] = '\0';
76011 + o_tmp->filename = tmp;
76012 +
76013 + insert_acl_obj_label(o_tmp, subj);
76014 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
76015 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
76016 + return -ENOMEM;
76017 +
76018 + ret = copy_user_glob(o_tmp);
76019 + if (ret)
76020 + return ret;
76021 +
76022 + if (o_tmp->nested) {
76023 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
76024 + if (IS_ERR(o_tmp->nested))
76025 + return PTR_ERR(o_tmp->nested);
76026 +
76027 + /* insert into nested subject list */
76028 + o_tmp->nested->next = role->hash->first;
76029 + role->hash->first = o_tmp->nested;
76030 + }
76031 + }
76032 +
76033 + return 0;
76034 +}
76035 +
76036 +static __u32
76037 +count_user_subjs(struct acl_subject_label *userp)
76038 +{
76039 + struct acl_subject_label s_tmp;
76040 + __u32 num = 0;
76041 +
76042 + while (userp) {
76043 + if (copy_from_user(&s_tmp, userp,
76044 + sizeof (struct acl_subject_label)))
76045 + break;
76046 +
76047 + userp = s_tmp.prev;
76048 + /* do not count nested subjects against this count, since
76049 + they are not included in the hash table, but are
76050 + attached to objects. We have already counted
76051 + the subjects in userspace for the allocation
76052 + stack
76053 + */
76054 + if (!(s_tmp.mode & GR_NESTED))
76055 + num++;
76056 + }
76057 +
76058 + return num;
76059 +}
76060 +
76061 +static int
76062 +copy_user_allowedips(struct acl_role_label *rolep)
76063 +{
76064 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
76065 +
76066 + ruserip = rolep->allowed_ips;
76067 +
76068 + while (ruserip) {
76069 + rlast = rtmp;
76070 +
76071 + if ((rtmp = (struct role_allowed_ip *)
76072 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
76073 + return -ENOMEM;
76074 +
76075 + if (copy_from_user(rtmp, ruserip,
76076 + sizeof (struct role_allowed_ip)))
76077 + return -EFAULT;
76078 +
76079 + ruserip = rtmp->prev;
76080 +
76081 + if (!rlast) {
76082 + rtmp->prev = NULL;
76083 + rolep->allowed_ips = rtmp;
76084 + } else {
76085 + rlast->next = rtmp;
76086 + rtmp->prev = rlast;
76087 + }
76088 +
76089 + if (!ruserip)
76090 + rtmp->next = NULL;
76091 + }
76092 +
76093 + return 0;
76094 +}
76095 +
76096 +static int
76097 +copy_user_transitions(struct acl_role_label *rolep)
76098 +{
76099 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
76100 +
76101 + unsigned int len;
76102 + char *tmp;
76103 +
76104 + rusertp = rolep->transitions;
76105 +
76106 + while (rusertp) {
76107 + rlast = rtmp;
76108 +
76109 + if ((rtmp = (struct role_transition *)
76110 + acl_alloc(sizeof (struct role_transition))) == NULL)
76111 + return -ENOMEM;
76112 +
76113 + if (copy_from_user(rtmp, rusertp,
76114 + sizeof (struct role_transition)))
76115 + return -EFAULT;
76116 +
76117 + rusertp = rtmp->prev;
76118 +
76119 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
76120 +
76121 + if (!len || len >= GR_SPROLE_LEN)
76122 + return -EINVAL;
76123 +
76124 + if ((tmp = (char *) acl_alloc(len)) == NULL)
76125 + return -ENOMEM;
76126 +
76127 + if (copy_from_user(tmp, rtmp->rolename, len))
76128 + return -EFAULT;
76129 + tmp[len-1] = '\0';
76130 + rtmp->rolename = tmp;
76131 +
76132 + if (!rlast) {
76133 + rtmp->prev = NULL;
76134 + rolep->transitions = rtmp;
76135 + } else {
76136 + rlast->next = rtmp;
76137 + rtmp->prev = rlast;
76138 + }
76139 +
76140 + if (!rusertp)
76141 + rtmp->next = NULL;
76142 + }
76143 +
76144 + return 0;
76145 +}
76146 +
76147 +static struct acl_subject_label *
76148 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
76149 +{
76150 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
76151 + unsigned int len;
76152 + char *tmp;
76153 + __u32 num_objs;
76154 + struct acl_ip_label **i_tmp, *i_utmp2;
76155 + struct gr_hash_struct ghash;
76156 + struct subject_map *subjmap;
76157 + unsigned int i_num;
76158 + int err;
76159 +
76160 + s_tmp = lookup_subject_map(userp);
76161 +
76162 + /* we've already copied this subject into the kernel, just return
76163 + the reference to it, and don't copy it over again
76164 + */
76165 + if (s_tmp)
76166 + return(s_tmp);
76167 +
76168 + if ((s_tmp = (struct acl_subject_label *)
76169 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
76170 + return ERR_PTR(-ENOMEM);
76171 +
76172 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
76173 + if (subjmap == NULL)
76174 + return ERR_PTR(-ENOMEM);
76175 +
76176 + subjmap->user = userp;
76177 + subjmap->kernel = s_tmp;
76178 + insert_subj_map_entry(subjmap);
76179 +
76180 + if (copy_from_user(s_tmp, userp,
76181 + sizeof (struct acl_subject_label)))
76182 + return ERR_PTR(-EFAULT);
76183 +
76184 + len = strnlen_user(s_tmp->filename, PATH_MAX);
76185 +
76186 + if (!len || len >= PATH_MAX)
76187 + return ERR_PTR(-EINVAL);
76188 +
76189 + if ((tmp = (char *) acl_alloc(len)) == NULL)
76190 + return ERR_PTR(-ENOMEM);
76191 +
76192 + if (copy_from_user(tmp, s_tmp->filename, len))
76193 + return ERR_PTR(-EFAULT);
76194 + tmp[len-1] = '\0';
76195 + s_tmp->filename = tmp;
76196 +
76197 + if (!strcmp(s_tmp->filename, "/"))
76198 + role->root_label = s_tmp;
76199 +
76200 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
76201 + return ERR_PTR(-EFAULT);
76202 +
76203 + /* copy user and group transition tables */
76204 +
76205 + if (s_tmp->user_trans_num) {
76206 + uid_t *uidlist;
76207 +
76208 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
76209 + if (uidlist == NULL)
76210 + return ERR_PTR(-ENOMEM);
76211 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
76212 + return ERR_PTR(-EFAULT);
76213 +
76214 + s_tmp->user_transitions = uidlist;
76215 + }
76216 +
76217 + if (s_tmp->group_trans_num) {
76218 + gid_t *gidlist;
76219 +
76220 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
76221 + if (gidlist == NULL)
76222 + return ERR_PTR(-ENOMEM);
76223 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
76224 + return ERR_PTR(-EFAULT);
76225 +
76226 + s_tmp->group_transitions = gidlist;
76227 + }
76228 +
76229 + /* set up object hash table */
76230 + num_objs = count_user_objs(ghash.first);
76231 +
76232 + s_tmp->obj_hash_size = num_objs;
76233 + s_tmp->obj_hash =
76234 + (struct acl_object_label **)
76235 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
76236 +
76237 + if (!s_tmp->obj_hash)
76238 + return ERR_PTR(-ENOMEM);
76239 +
76240 + memset(s_tmp->obj_hash, 0,
76241 + s_tmp->obj_hash_size *
76242 + sizeof (struct acl_object_label *));
76243 +
76244 + /* add in objects */
76245 + err = copy_user_objs(ghash.first, s_tmp, role);
76246 +
76247 + if (err)
76248 + return ERR_PTR(err);
76249 +
76250 + /* set pointer for parent subject */
76251 + if (s_tmp->parent_subject) {
76252 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
76253 +
76254 + if (IS_ERR(s_tmp2))
76255 + return s_tmp2;
76256 +
76257 + s_tmp->parent_subject = s_tmp2;
76258 + }
76259 +
76260 + /* add in ip acls */
76261 +
76262 + if (!s_tmp->ip_num) {
76263 + s_tmp->ips = NULL;
76264 + goto insert;
76265 + }
76266 +
76267 + i_tmp =
76268 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
76269 + sizeof (struct acl_ip_label *));
76270 +
76271 + if (!i_tmp)
76272 + return ERR_PTR(-ENOMEM);
76273 +
76274 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
76275 + *(i_tmp + i_num) =
76276 + (struct acl_ip_label *)
76277 + acl_alloc(sizeof (struct acl_ip_label));
76278 + if (!*(i_tmp + i_num))
76279 + return ERR_PTR(-ENOMEM);
76280 +
76281 + if (copy_from_user
76282 + (&i_utmp2, s_tmp->ips + i_num,
76283 + sizeof (struct acl_ip_label *)))
76284 + return ERR_PTR(-EFAULT);
76285 +
76286 + if (copy_from_user
76287 + (*(i_tmp + i_num), i_utmp2,
76288 + sizeof (struct acl_ip_label)))
76289 + return ERR_PTR(-EFAULT);
76290 +
76291 + if ((*(i_tmp + i_num))->iface == NULL)
76292 + continue;
76293 +
76294 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
76295 + if (!len || len >= IFNAMSIZ)
76296 + return ERR_PTR(-EINVAL);
76297 + tmp = acl_alloc(len);
76298 + if (tmp == NULL)
76299 + return ERR_PTR(-ENOMEM);
76300 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
76301 + return ERR_PTR(-EFAULT);
76302 + (*(i_tmp + i_num))->iface = tmp;
76303 + }
76304 +
76305 + s_tmp->ips = i_tmp;
76306 +
76307 +insert:
76308 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
76309 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
76310 + return ERR_PTR(-ENOMEM);
76311 +
76312 + return s_tmp;
76313 +}
76314 +
76315 +static int
76316 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
76317 +{
76318 + struct acl_subject_label s_pre;
76319 + struct acl_subject_label * ret;
76320 + int err;
76321 +
76322 + while (userp) {
76323 + if (copy_from_user(&s_pre, userp,
76324 + sizeof (struct acl_subject_label)))
76325 + return -EFAULT;
76326 +
76327 + /* do not add nested subjects here, add
76328 + while parsing objects
76329 + */
76330 +
76331 + if (s_pre.mode & GR_NESTED) {
76332 + userp = s_pre.prev;
76333 + continue;
76334 + }
76335 +
76336 + ret = do_copy_user_subj(userp, role);
76337 +
76338 + err = PTR_ERR(ret);
76339 + if (IS_ERR(ret))
76340 + return err;
76341 +
76342 + insert_acl_subj_label(ret, role);
76343 +
76344 + userp = s_pre.prev;
76345 + }
76346 +
76347 + return 0;
76348 +}
76349 +
76350 +static int
76351 +copy_user_acl(struct gr_arg *arg)
76352 +{
76353 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
76354 + struct sprole_pw *sptmp;
76355 + struct gr_hash_struct *ghash;
76356 + uid_t *domainlist;
76357 + unsigned int r_num;
76358 + unsigned int len;
76359 + char *tmp;
76360 + int err = 0;
76361 + __u16 i;
76362 + __u32 num_subjs;
76363 +
76364 + /* we need a default and kernel role */
76365 + if (arg->role_db.num_roles < 2)
76366 + return -EINVAL;
76367 +
76368 + /* copy special role authentication info from userspace */
76369 +
76370 + num_sprole_pws = arg->num_sprole_pws;
76371 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
76372 +
76373 + if (!acl_special_roles) {
76374 + err = -ENOMEM;
76375 + goto cleanup;
76376 + }
76377 +
76378 + for (i = 0; i < num_sprole_pws; i++) {
76379 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
76380 + if (!sptmp) {
76381 + err = -ENOMEM;
76382 + goto cleanup;
76383 + }
76384 + if (copy_from_user(sptmp, arg->sprole_pws + i,
76385 + sizeof (struct sprole_pw))) {
76386 + err = -EFAULT;
76387 + goto cleanup;
76388 + }
76389 +
76390 + len =
76391 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
76392 +
76393 + if (!len || len >= GR_SPROLE_LEN) {
76394 + err = -EINVAL;
76395 + goto cleanup;
76396 + }
76397 +
76398 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
76399 + err = -ENOMEM;
76400 + goto cleanup;
76401 + }
76402 +
76403 + if (copy_from_user(tmp, sptmp->rolename, len)) {
76404 + err = -EFAULT;
76405 + goto cleanup;
76406 + }
76407 + tmp[len-1] = '\0';
76408 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
76409 + printk(KERN_ALERT "Copying special role %s\n", tmp);
76410 +#endif
76411 + sptmp->rolename = tmp;
76412 + acl_special_roles[i] = sptmp;
76413 + }
76414 +
76415 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
76416 +
76417 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
76418 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
76419 +
76420 + if (!r_tmp) {
76421 + err = -ENOMEM;
76422 + goto cleanup;
76423 + }
76424 +
76425 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
76426 + sizeof (struct acl_role_label *))) {
76427 + err = -EFAULT;
76428 + goto cleanup;
76429 + }
76430 +
76431 + if (copy_from_user(r_tmp, r_utmp2,
76432 + sizeof (struct acl_role_label))) {
76433 + err = -EFAULT;
76434 + goto cleanup;
76435 + }
76436 +
76437 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
76438 +
76439 + if (!len || len >= PATH_MAX) {
76440 + err = -EINVAL;
76441 + goto cleanup;
76442 + }
76443 +
76444 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
76445 + err = -ENOMEM;
76446 + goto cleanup;
76447 + }
76448 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
76449 + err = -EFAULT;
76450 + goto cleanup;
76451 + }
76452 + tmp[len-1] = '\0';
76453 + r_tmp->rolename = tmp;
76454 +
76455 + if (!strcmp(r_tmp->rolename, "default")
76456 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
76457 + default_role = r_tmp;
76458 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
76459 + kernel_role = r_tmp;
76460 + }
76461 +
76462 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
76463 + err = -ENOMEM;
76464 + goto cleanup;
76465 + }
76466 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
76467 + err = -EFAULT;
76468 + goto cleanup;
76469 + }
76470 +
76471 + r_tmp->hash = ghash;
76472 +
76473 + num_subjs = count_user_subjs(r_tmp->hash->first);
76474 +
76475 + r_tmp->subj_hash_size = num_subjs;
76476 + r_tmp->subj_hash =
76477 + (struct acl_subject_label **)
76478 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
76479 +
76480 + if (!r_tmp->subj_hash) {
76481 + err = -ENOMEM;
76482 + goto cleanup;
76483 + }
76484 +
76485 + err = copy_user_allowedips(r_tmp);
76486 + if (err)
76487 + goto cleanup;
76488 +
76489 + /* copy domain info */
76490 + if (r_tmp->domain_children != NULL) {
76491 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
76492 + if (domainlist == NULL) {
76493 + err = -ENOMEM;
76494 + goto cleanup;
76495 + }
76496 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
76497 + err = -EFAULT;
76498 + goto cleanup;
76499 + }
76500 + r_tmp->domain_children = domainlist;
76501 + }
76502 +
76503 + err = copy_user_transitions(r_tmp);
76504 + if (err)
76505 + goto cleanup;
76506 +
76507 + memset(r_tmp->subj_hash, 0,
76508 + r_tmp->subj_hash_size *
76509 + sizeof (struct acl_subject_label *));
76510 +
76511 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
76512 +
76513 + if (err)
76514 + goto cleanup;
76515 +
76516 + /* set nested subject list to null */
76517 + r_tmp->hash->first = NULL;
76518 +
76519 + insert_acl_role_label(r_tmp);
76520 + }
76521 +
76522 + goto return_err;
76523 + cleanup:
76524 + free_variables();
76525 + return_err:
76526 + return err;
76527 +
76528 +}
76529 +
76530 +static int
76531 +gracl_init(struct gr_arg *args)
76532 +{
76533 + int error = 0;
76534 +
76535 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
76536 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
76537 +
76538 + if (init_variables(args)) {
76539 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76540 + error = -ENOMEM;
76541 + free_variables();
76542 + goto out;
76543 + }
76544 +
76545 + error = copy_user_acl(args);
76546 + free_init_variables();
76547 + if (error) {
76548 + free_variables();
76549 + goto out;
76550 + }
76551 +
76552 + if ((error = gr_set_acls(0))) {
76553 + free_variables();
76554 + goto out;
76555 + }
76556 +
76557 + pax_open_kernel();
76558 + gr_status |= GR_READY;
76559 + pax_close_kernel();
76560 +
76561 + out:
76562 + return error;
76563 +}
76564 +
76565 +/* derived from glibc fnmatch() 0: match, 1: no match*/
76566 +
76567 +static int
76568 +glob_match(const char *p, const char *n)
76569 +{
76570 + char c;
76571 +
76572 + while ((c = *p++) != '\0') {
76573 + switch (c) {
76574 + case '?':
76575 + if (*n == '\0')
76576 + return 1;
76577 + else if (*n == '/')
76578 + return 1;
76579 + break;
76580 + case '\\':
76581 + if (*n != c)
76582 + return 1;
76583 + break;
76584 + case '*':
76585 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
76586 + if (*n == '/')
76587 + return 1;
76588 + else if (c == '?') {
76589 + if (*n == '\0')
76590 + return 1;
76591 + else
76592 + ++n;
76593 + }
76594 + }
76595 + if (c == '\0') {
76596 + return 0;
76597 + } else {
76598 + const char *endp;
76599 +
76600 + if ((endp = strchr(n, '/')) == NULL)
76601 + endp = n + strlen(n);
76602 +
76603 + if (c == '[') {
76604 + for (--p; n < endp; ++n)
76605 + if (!glob_match(p, n))
76606 + return 0;
76607 + } else if (c == '/') {
76608 + while (*n != '\0' && *n != '/')
76609 + ++n;
76610 + if (*n == '/' && !glob_match(p, n + 1))
76611 + return 0;
76612 + } else {
76613 + for (--p; n < endp; ++n)
76614 + if (*n == c && !glob_match(p, n))
76615 + return 0;
76616 + }
76617 +
76618 + return 1;
76619 + }
76620 + case '[':
76621 + {
76622 + int not;
76623 + char cold;
76624 +
76625 + if (*n == '\0' || *n == '/')
76626 + return 1;
76627 +
76628 + not = (*p == '!' || *p == '^');
76629 + if (not)
76630 + ++p;
76631 +
76632 + c = *p++;
76633 + for (;;) {
76634 + unsigned char fn = (unsigned char)*n;
76635 +
76636 + if (c == '\0')
76637 + return 1;
76638 + else {
76639 + if (c == fn)
76640 + goto matched;
76641 + cold = c;
76642 + c = *p++;
76643 +
76644 + if (c == '-' && *p != ']') {
76645 + unsigned char cend = *p++;
76646 +
76647 + if (cend == '\0')
76648 + return 1;
76649 +
76650 + if (cold <= fn && fn <= cend)
76651 + goto matched;
76652 +
76653 + c = *p++;
76654 + }
76655 + }
76656 +
76657 + if (c == ']')
76658 + break;
76659 + }
76660 + if (!not)
76661 + return 1;
76662 + break;
76663 + matched:
76664 + while (c != ']') {
76665 + if (c == '\0')
76666 + return 1;
76667 +
76668 + c = *p++;
76669 + }
76670 + if (not)
76671 + return 1;
76672 + }
76673 + break;
76674 + default:
76675 + if (c != *n)
76676 + return 1;
76677 + }
76678 +
76679 + ++n;
76680 + }
76681 +
76682 + if (*n == '\0')
76683 + return 0;
76684 +
76685 + if (*n == '/')
76686 + return 0;
76687 +
76688 + return 1;
76689 +}
76690 +
76691 +static struct acl_object_label *
76692 +chk_glob_label(struct acl_object_label *globbed,
76693 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
76694 +{
76695 + struct acl_object_label *tmp;
76696 +
76697 + if (*path == NULL)
76698 + *path = gr_to_filename_nolock(dentry, mnt);
76699 +
76700 + tmp = globbed;
76701 +
76702 + while (tmp) {
76703 + if (!glob_match(tmp->filename, *path))
76704 + return tmp;
76705 + tmp = tmp->next;
76706 + }
76707 +
76708 + return NULL;
76709 +}
76710 +
76711 +static struct acl_object_label *
76712 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
76713 + const ino_t curr_ino, const dev_t curr_dev,
76714 + const struct acl_subject_label *subj, char **path, const int checkglob)
76715 +{
76716 + struct acl_subject_label *tmpsubj;
76717 + struct acl_object_label *retval;
76718 + struct acl_object_label *retval2;
76719 +
76720 + tmpsubj = (struct acl_subject_label *) subj;
76721 + read_lock(&gr_inode_lock);
76722 + do {
76723 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
76724 + if (retval) {
76725 + if (checkglob && retval->globbed) {
76726 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
76727 + if (retval2)
76728 + retval = retval2;
76729 + }
76730 + break;
76731 + }
76732 + } while ((tmpsubj = tmpsubj->parent_subject));
76733 + read_unlock(&gr_inode_lock);
76734 +
76735 + return retval;
76736 +}
76737 +
76738 +static __inline__ struct acl_object_label *
76739 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
76740 + const struct dentry *curr_dentry,
76741 + const struct acl_subject_label *subj, char **path, const int checkglob)
76742 +{
76743 + int newglob = checkglob;
76744 +
76745 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
76746 + as we don't want a / * rule to match instead of the / object
76747 + don't do this for create lookups that call this function though, since they're looking up
76748 + on the parent and thus need globbing checks on all paths
76749 + */
76750 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
76751 + newglob = GR_NO_GLOB;
76752 +
76753 + return __full_lookup(orig_dentry, orig_mnt,
76754 + curr_dentry->d_inode->i_ino,
76755 + __get_dev(curr_dentry), subj, path, newglob);
76756 +}
76757 +
76758 +static struct acl_object_label *
76759 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76760 + const struct acl_subject_label *subj, char *path, const int checkglob)
76761 +{
76762 + struct dentry *dentry = (struct dentry *) l_dentry;
76763 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
76764 + struct acl_object_label *retval;
76765 +
76766 + spin_lock(&dcache_lock);
76767 + spin_lock(&vfsmount_lock);
76768 +
76769 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
76770 +#ifdef CONFIG_NET
76771 + mnt == sock_mnt ||
76772 +#endif
76773 +#ifdef CONFIG_HUGETLBFS
76774 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
76775 +#endif
76776 + /* ignore Eric Biederman */
76777 + IS_PRIVATE(l_dentry->d_inode))) {
76778 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
76779 + goto out;
76780 + }
76781 +
76782 + for (;;) {
76783 + if (dentry == real_root && mnt == real_root_mnt)
76784 + break;
76785 +
76786 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
76787 + if (mnt->mnt_parent == mnt)
76788 + break;
76789 +
76790 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76791 + if (retval != NULL)
76792 + goto out;
76793 +
76794 + dentry = mnt->mnt_mountpoint;
76795 + mnt = mnt->mnt_parent;
76796 + continue;
76797 + }
76798 +
76799 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76800 + if (retval != NULL)
76801 + goto out;
76802 +
76803 + dentry = dentry->d_parent;
76804 + }
76805 +
76806 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76807 +
76808 + if (retval == NULL)
76809 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
76810 +out:
76811 + spin_unlock(&vfsmount_lock);
76812 + spin_unlock(&dcache_lock);
76813 +
76814 + BUG_ON(retval == NULL);
76815 +
76816 + return retval;
76817 +}
76818 +
76819 +static __inline__ struct acl_object_label *
76820 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76821 + const struct acl_subject_label *subj)
76822 +{
76823 + char *path = NULL;
76824 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
76825 +}
76826 +
76827 +static __inline__ struct acl_object_label *
76828 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76829 + const struct acl_subject_label *subj)
76830 +{
76831 + char *path = NULL;
76832 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
76833 +}
76834 +
76835 +static __inline__ struct acl_object_label *
76836 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76837 + const struct acl_subject_label *subj, char *path)
76838 +{
76839 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
76840 +}
76841 +
76842 +static struct acl_subject_label *
76843 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76844 + const struct acl_role_label *role)
76845 +{
76846 + struct dentry *dentry = (struct dentry *) l_dentry;
76847 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
76848 + struct acl_subject_label *retval;
76849 +
76850 + spin_lock(&dcache_lock);
76851 + spin_lock(&vfsmount_lock);
76852 +
76853 + for (;;) {
76854 + if (dentry == real_root && mnt == real_root_mnt)
76855 + break;
76856 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
76857 + if (mnt->mnt_parent == mnt)
76858 + break;
76859 +
76860 + read_lock(&gr_inode_lock);
76861 + retval =
76862 + lookup_acl_subj_label(dentry->d_inode->i_ino,
76863 + __get_dev(dentry), role);
76864 + read_unlock(&gr_inode_lock);
76865 + if (retval != NULL)
76866 + goto out;
76867 +
76868 + dentry = mnt->mnt_mountpoint;
76869 + mnt = mnt->mnt_parent;
76870 + continue;
76871 + }
76872 +
76873 + read_lock(&gr_inode_lock);
76874 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
76875 + __get_dev(dentry), role);
76876 + read_unlock(&gr_inode_lock);
76877 + if (retval != NULL)
76878 + goto out;
76879 +
76880 + dentry = dentry->d_parent;
76881 + }
76882 +
76883 + read_lock(&gr_inode_lock);
76884 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
76885 + __get_dev(dentry), role);
76886 + read_unlock(&gr_inode_lock);
76887 +
76888 + if (unlikely(retval == NULL)) {
76889 + read_lock(&gr_inode_lock);
76890 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
76891 + __get_dev(real_root), role);
76892 + read_unlock(&gr_inode_lock);
76893 + }
76894 +out:
76895 + spin_unlock(&vfsmount_lock);
76896 + spin_unlock(&dcache_lock);
76897 +
76898 + BUG_ON(retval == NULL);
76899 +
76900 + return retval;
76901 +}
76902 +
76903 +static void
76904 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
76905 +{
76906 + struct task_struct *task = current;
76907 + const struct cred *cred = current_cred();
76908 +
76909 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
76910 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76911 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76912 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
76913 +
76914 + return;
76915 +}
76916 +
76917 +static void
76918 +gr_log_learn_sysctl(const char *path, const __u32 mode)
76919 +{
76920 + struct task_struct *task = current;
76921 + const struct cred *cred = current_cred();
76922 +
76923 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
76924 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76925 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76926 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
76927 +
76928 + return;
76929 +}
76930 +
76931 +static void
76932 +gr_log_learn_id_change(const char type, const unsigned int real,
76933 + const unsigned int effective, const unsigned int fs)
76934 +{
76935 + struct task_struct *task = current;
76936 + const struct cred *cred = current_cred();
76937 +
76938 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
76939 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76940 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76941 + type, real, effective, fs, &task->signal->saved_ip);
76942 +
76943 + return;
76944 +}
76945 +
76946 +__u32
76947 +gr_search_file(const struct dentry * dentry, const __u32 mode,
76948 + const struct vfsmount * mnt)
76949 +{
76950 + __u32 retval = mode;
76951 + struct acl_subject_label *curracl;
76952 + struct acl_object_label *currobj;
76953 +
76954 + if (unlikely(!(gr_status & GR_READY)))
76955 + return (mode & ~GR_AUDITS);
76956 +
76957 + curracl = current->acl;
76958 +
76959 + currobj = chk_obj_label(dentry, mnt, curracl);
76960 + retval = currobj->mode & mode;
76961 +
76962 + /* if we're opening a specified transfer file for writing
76963 + (e.g. /dev/initctl), then transfer our role to init
76964 + */
76965 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
76966 + current->role->roletype & GR_ROLE_PERSIST)) {
76967 + struct task_struct *task = init_pid_ns.child_reaper;
76968 +
76969 + if (task->role != current->role) {
76970 + task->acl_sp_role = 0;
76971 + task->acl_role_id = current->acl_role_id;
76972 + task->role = current->role;
76973 + rcu_read_lock();
76974 + read_lock(&grsec_exec_file_lock);
76975 + gr_apply_subject_to_task(task);
76976 + read_unlock(&grsec_exec_file_lock);
76977 + rcu_read_unlock();
76978 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
76979 + }
76980 + }
76981 +
76982 + if (unlikely
76983 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
76984 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
76985 + __u32 new_mode = mode;
76986 +
76987 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
76988 +
76989 + retval = new_mode;
76990 +
76991 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
76992 + new_mode |= GR_INHERIT;
76993 +
76994 + if (!(mode & GR_NOLEARN))
76995 + gr_log_learn(dentry, mnt, new_mode);
76996 + }
76997 +
76998 + return retval;
76999 +}
77000 +
77001 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
77002 + const struct dentry *parent,
77003 + const struct vfsmount *mnt)
77004 +{
77005 + struct name_entry *match;
77006 + struct acl_object_label *matchpo;
77007 + struct acl_subject_label *curracl;
77008 + char *path;
77009 +
77010 + if (unlikely(!(gr_status & GR_READY)))
77011 + return NULL;
77012 +
77013 + preempt_disable();
77014 + path = gr_to_filename_rbac(new_dentry, mnt);
77015 + match = lookup_name_entry_create(path);
77016 +
77017 + curracl = current->acl;
77018 +
77019 + if (match) {
77020 + read_lock(&gr_inode_lock);
77021 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
77022 + read_unlock(&gr_inode_lock);
77023 +
77024 + if (matchpo) {
77025 + preempt_enable();
77026 + return matchpo;
77027 + }
77028 + }
77029 +
77030 + // lookup parent
77031 +
77032 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
77033 +
77034 + preempt_enable();
77035 + return matchpo;
77036 +}
77037 +
77038 +__u32
77039 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
77040 + const struct vfsmount * mnt, const __u32 mode)
77041 +{
77042 + struct acl_object_label *matchpo;
77043 + __u32 retval;
77044 +
77045 + if (unlikely(!(gr_status & GR_READY)))
77046 + return (mode & ~GR_AUDITS);
77047 +
77048 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
77049 +
77050 + retval = matchpo->mode & mode;
77051 +
77052 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
77053 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
77054 + __u32 new_mode = mode;
77055 +
77056 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
77057 +
77058 + gr_log_learn(new_dentry, mnt, new_mode);
77059 + return new_mode;
77060 + }
77061 +
77062 + return retval;
77063 +}
77064 +
77065 +__u32
77066 +gr_check_link(const struct dentry * new_dentry,
77067 + const struct dentry * parent_dentry,
77068 + const struct vfsmount * parent_mnt,
77069 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
77070 +{
77071 + struct acl_object_label *obj;
77072 + __u32 oldmode, newmode;
77073 + __u32 needmode;
77074 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
77075 + GR_DELETE | GR_INHERIT;
77076 +
77077 + if (unlikely(!(gr_status & GR_READY)))
77078 + return (GR_CREATE | GR_LINK);
77079 +
77080 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
77081 + oldmode = obj->mode;
77082 +
77083 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
77084 + newmode = obj->mode;
77085 +
77086 + needmode = newmode & checkmodes;
77087 +
77088 + // old name for hardlink must have at least the permissions of the new name
77089 + if ((oldmode & needmode) != needmode)
77090 + goto bad;
77091 +
77092 + // if old name had restrictions/auditing, make sure the new name does as well
77093 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
77094 +
77095 + // don't allow hardlinking of suid/sgid files without permission
77096 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
77097 + needmode |= GR_SETID;
77098 +
77099 + if ((newmode & needmode) != needmode)
77100 + goto bad;
77101 +
77102 + // enforce minimum permissions
77103 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
77104 + return newmode;
77105 +bad:
77106 + needmode = oldmode;
77107 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
77108 + needmode |= GR_SETID;
77109 +
77110 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
77111 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
77112 + return (GR_CREATE | GR_LINK);
77113 + } else if (newmode & GR_SUPPRESS)
77114 + return GR_SUPPRESS;
77115 + else
77116 + return 0;
77117 +}
77118 +
77119 +int
77120 +gr_check_hidden_task(const struct task_struct *task)
77121 +{
77122 + if (unlikely(!(gr_status & GR_READY)))
77123 + return 0;
77124 +
77125 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
77126 + return 1;
77127 +
77128 + return 0;
77129 +}
77130 +
77131 +int
77132 +gr_check_protected_task(const struct task_struct *task)
77133 +{
77134 + if (unlikely(!(gr_status & GR_READY) || !task))
77135 + return 0;
77136 +
77137 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
77138 + task->acl != current->acl)
77139 + return 1;
77140 +
77141 + return 0;
77142 +}
77143 +
77144 +int
77145 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77146 +{
77147 + struct task_struct *p;
77148 + int ret = 0;
77149 +
77150 + if (unlikely(!(gr_status & GR_READY) || !pid))
77151 + return ret;
77152 +
77153 + read_lock(&tasklist_lock);
77154 + do_each_pid_task(pid, type, p) {
77155 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
77156 + p->acl != current->acl) {
77157 + ret = 1;
77158 + goto out;
77159 + }
77160 + } while_each_pid_task(pid, type, p);
77161 +out:
77162 + read_unlock(&tasklist_lock);
77163 +
77164 + return ret;
77165 +}
77166 +
77167 +void
77168 +gr_copy_label(struct task_struct *tsk)
77169 +{
77170 + /* plain copying of fields is already done by dup_task_struct */
77171 + tsk->signal->used_accept = 0;
77172 + tsk->acl_sp_role = 0;
77173 + //tsk->acl_role_id = current->acl_role_id;
77174 + //tsk->acl = current->acl;
77175 + //tsk->role = current->role;
77176 + tsk->signal->curr_ip = current->signal->curr_ip;
77177 + tsk->signal->saved_ip = current->signal->saved_ip;
77178 + if (current->exec_file)
77179 + get_file(current->exec_file);
77180 + //tsk->exec_file = current->exec_file;
77181 + //tsk->is_writable = current->is_writable;
77182 + if (unlikely(current->signal->used_accept)) {
77183 + current->signal->curr_ip = 0;
77184 + current->signal->saved_ip = 0;
77185 + }
77186 +
77187 + return;
77188 +}
77189 +
77190 +static void
77191 +gr_set_proc_res(struct task_struct *task)
77192 +{
77193 + struct acl_subject_label *proc;
77194 + unsigned short i;
77195 +
77196 + proc = task->acl;
77197 +
77198 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
77199 + return;
77200 +
77201 + for (i = 0; i < RLIM_NLIMITS; i++) {
77202 + if (!(proc->resmask & (1 << i)))
77203 + continue;
77204 +
77205 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
77206 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
77207 + }
77208 +
77209 + return;
77210 +}
77211 +
77212 +extern int __gr_process_user_ban(struct user_struct *user);
77213 +
77214 +int
77215 +gr_check_user_change(int real, int effective, int fs)
77216 +{
77217 + unsigned int i;
77218 + __u16 num;
77219 + uid_t *uidlist;
77220 + int curuid;
77221 + int realok = 0;
77222 + int effectiveok = 0;
77223 + int fsok = 0;
77224 +
77225 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
77226 + struct user_struct *user;
77227 +
77228 + if (real == -1)
77229 + goto skipit;
77230 +
77231 + user = find_user(real);
77232 + if (user == NULL)
77233 + goto skipit;
77234 +
77235 + if (__gr_process_user_ban(user)) {
77236 + /* for find_user */
77237 + free_uid(user);
77238 + return 1;
77239 + }
77240 +
77241 + /* for find_user */
77242 + free_uid(user);
77243 +
77244 +skipit:
77245 +#endif
77246 +
77247 + if (unlikely(!(gr_status & GR_READY)))
77248 + return 0;
77249 +
77250 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
77251 + gr_log_learn_id_change('u', real, effective, fs);
77252 +
77253 + num = current->acl->user_trans_num;
77254 + uidlist = current->acl->user_transitions;
77255 +
77256 + if (uidlist == NULL)
77257 + return 0;
77258 +
77259 + if (real == -1)
77260 + realok = 1;
77261 + if (effective == -1)
77262 + effectiveok = 1;
77263 + if (fs == -1)
77264 + fsok = 1;
77265 +
77266 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
77267 + for (i = 0; i < num; i++) {
77268 + curuid = (int)uidlist[i];
77269 + if (real == curuid)
77270 + realok = 1;
77271 + if (effective == curuid)
77272 + effectiveok = 1;
77273 + if (fs == curuid)
77274 + fsok = 1;
77275 + }
77276 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
77277 + for (i = 0; i < num; i++) {
77278 + curuid = (int)uidlist[i];
77279 + if (real == curuid)
77280 + break;
77281 + if (effective == curuid)
77282 + break;
77283 + if (fs == curuid)
77284 + break;
77285 + }
77286 + /* not in deny list */
77287 + if (i == num) {
77288 + realok = 1;
77289 + effectiveok = 1;
77290 + fsok = 1;
77291 + }
77292 + }
77293 +
77294 + if (realok && effectiveok && fsok)
77295 + return 0;
77296 + else {
77297 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
77298 + return 1;
77299 + }
77300 +}
77301 +
77302 +int
77303 +gr_check_group_change(int real, int effective, int fs)
77304 +{
77305 + unsigned int i;
77306 + __u16 num;
77307 + gid_t *gidlist;
77308 + int curgid;
77309 + int realok = 0;
77310 + int effectiveok = 0;
77311 + int fsok = 0;
77312 +
77313 + if (unlikely(!(gr_status & GR_READY)))
77314 + return 0;
77315 +
77316 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
77317 + gr_log_learn_id_change('g', real, effective, fs);
77318 +
77319 + num = current->acl->group_trans_num;
77320 + gidlist = current->acl->group_transitions;
77321 +
77322 + if (gidlist == NULL)
77323 + return 0;
77324 +
77325 + if (real == -1)
77326 + realok = 1;
77327 + if (effective == -1)
77328 + effectiveok = 1;
77329 + if (fs == -1)
77330 + fsok = 1;
77331 +
77332 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
77333 + for (i = 0; i < num; i++) {
77334 + curgid = (int)gidlist[i];
77335 + if (real == curgid)
77336 + realok = 1;
77337 + if (effective == curgid)
77338 + effectiveok = 1;
77339 + if (fs == curgid)
77340 + fsok = 1;
77341 + }
77342 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
77343 + for (i = 0; i < num; i++) {
77344 + curgid = (int)gidlist[i];
77345 + if (real == curgid)
77346 + break;
77347 + if (effective == curgid)
77348 + break;
77349 + if (fs == curgid)
77350 + break;
77351 + }
77352 + /* not in deny list */
77353 + if (i == num) {
77354 + realok = 1;
77355 + effectiveok = 1;
77356 + fsok = 1;
77357 + }
77358 + }
77359 +
77360 + if (realok && effectiveok && fsok)
77361 + return 0;
77362 + else {
77363 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
77364 + return 1;
77365 + }
77366 +}
77367 +
77368 +extern int gr_acl_is_capable(const int cap);
77369 +
77370 +void
77371 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
77372 +{
77373 + struct acl_role_label *role = task->role;
77374 + struct acl_subject_label *subj = NULL;
77375 + struct acl_object_label *obj;
77376 + struct file *filp;
77377 +
77378 + if (unlikely(!(gr_status & GR_READY)))
77379 + return;
77380 +
77381 + filp = task->exec_file;
77382 +
77383 + /* kernel process, we'll give them the kernel role */
77384 + if (unlikely(!filp)) {
77385 + task->role = kernel_role;
77386 + task->acl = kernel_role->root_label;
77387 + return;
77388 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
77389 + role = lookup_acl_role_label(task, uid, gid);
77390 +
77391 + /* don't change the role if we're not a privileged process */
77392 + if (role && task->role != role &&
77393 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
77394 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
77395 + return;
77396 +
77397 + /* perform subject lookup in possibly new role
77398 + we can use this result below in the case where role == task->role
77399 + */
77400 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
77401 +
77402 + /* if we changed uid/gid, but result in the same role
77403 + and are using inheritance, don't lose the inherited subject
77404 + if current subject is other than what normal lookup
77405 + would result in, we arrived via inheritance, don't
77406 + lose subject
77407 + */
77408 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
77409 + (subj == task->acl)))
77410 + task->acl = subj;
77411 +
77412 + task->role = role;
77413 +
77414 + task->is_writable = 0;
77415 +
77416 + /* ignore additional mmap checks for processes that are writable
77417 + by the default ACL */
77418 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
77419 + if (unlikely(obj->mode & GR_WRITE))
77420 + task->is_writable = 1;
77421 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
77422 + if (unlikely(obj->mode & GR_WRITE))
77423 + task->is_writable = 1;
77424 +
77425 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77426 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
77427 +#endif
77428 +
77429 + gr_set_proc_res(task);
77430 +
77431 + return;
77432 +}
77433 +
77434 +int
77435 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77436 + const int unsafe_flags)
77437 +{
77438 + struct task_struct *task = current;
77439 + struct acl_subject_label *newacl;
77440 + struct acl_object_label *obj;
77441 + __u32 retmode;
77442 +
77443 + if (unlikely(!(gr_status & GR_READY)))
77444 + return 0;
77445 +
77446 + newacl = chk_subj_label(dentry, mnt, task->role);
77447 +
77448 + task_lock(task);
77449 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
77450 + !(task->role->roletype & GR_ROLE_GOD) &&
77451 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
77452 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
77453 + task_unlock(task);
77454 + if (unsafe_flags & LSM_UNSAFE_SHARE)
77455 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
77456 + else
77457 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
77458 + return -EACCES;
77459 + }
77460 + task_unlock(task);
77461 +
77462 + obj = chk_obj_label(dentry, mnt, task->acl);
77463 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
77464 +
77465 + if (!(task->acl->mode & GR_INHERITLEARN) &&
77466 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
77467 + if (obj->nested)
77468 + task->acl = obj->nested;
77469 + else
77470 + task->acl = newacl;
77471 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
77472 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
77473 +
77474 + task->is_writable = 0;
77475 +
77476 + /* ignore additional mmap checks for processes that are writable
77477 + by the default ACL */
77478 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
77479 + if (unlikely(obj->mode & GR_WRITE))
77480 + task->is_writable = 1;
77481 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
77482 + if (unlikely(obj->mode & GR_WRITE))
77483 + task->is_writable = 1;
77484 +
77485 + gr_set_proc_res(task);
77486 +
77487 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77488 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
77489 +#endif
77490 + return 0;
77491 +}
77492 +
77493 +/* always called with valid inodev ptr */
77494 +static void
77495 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
77496 +{
77497 + struct acl_object_label *matchpo;
77498 + struct acl_subject_label *matchps;
77499 + struct acl_subject_label *subj;
77500 + struct acl_role_label *role;
77501 + unsigned int x;
77502 +
77503 + FOR_EACH_ROLE_START(role)
77504 + FOR_EACH_SUBJECT_START(role, subj, x)
77505 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
77506 + matchpo->mode |= GR_DELETED;
77507 + FOR_EACH_SUBJECT_END(subj,x)
77508 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
77509 + if (subj->inode == ino && subj->device == dev)
77510 + subj->mode |= GR_DELETED;
77511 + FOR_EACH_NESTED_SUBJECT_END(subj)
77512 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
77513 + matchps->mode |= GR_DELETED;
77514 + FOR_EACH_ROLE_END(role)
77515 +
77516 + inodev->nentry->deleted = 1;
77517 +
77518 + return;
77519 +}
77520 +
77521 +void
77522 +gr_handle_delete(const ino_t ino, const dev_t dev)
77523 +{
77524 + struct inodev_entry *inodev;
77525 +
77526 + if (unlikely(!(gr_status & GR_READY)))
77527 + return;
77528 +
77529 + write_lock(&gr_inode_lock);
77530 + inodev = lookup_inodev_entry(ino, dev);
77531 + if (inodev != NULL)
77532 + do_handle_delete(inodev, ino, dev);
77533 + write_unlock(&gr_inode_lock);
77534 +
77535 + return;
77536 +}
77537 +
77538 +static void
77539 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
77540 + const ino_t newinode, const dev_t newdevice,
77541 + struct acl_subject_label *subj)
77542 +{
77543 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
77544 + struct acl_object_label *match;
77545 +
77546 + match = subj->obj_hash[index];
77547 +
77548 + while (match && (match->inode != oldinode ||
77549 + match->device != olddevice ||
77550 + !(match->mode & GR_DELETED)))
77551 + match = match->next;
77552 +
77553 + if (match && (match->inode == oldinode)
77554 + && (match->device == olddevice)
77555 + && (match->mode & GR_DELETED)) {
77556 + if (match->prev == NULL) {
77557 + subj->obj_hash[index] = match->next;
77558 + if (match->next != NULL)
77559 + match->next->prev = NULL;
77560 + } else {
77561 + match->prev->next = match->next;
77562 + if (match->next != NULL)
77563 + match->next->prev = match->prev;
77564 + }
77565 + match->prev = NULL;
77566 + match->next = NULL;
77567 + match->inode = newinode;
77568 + match->device = newdevice;
77569 + match->mode &= ~GR_DELETED;
77570 +
77571 + insert_acl_obj_label(match, subj);
77572 + }
77573 +
77574 + return;
77575 +}
77576 +
77577 +static void
77578 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
77579 + const ino_t newinode, const dev_t newdevice,
77580 + struct acl_role_label *role)
77581 +{
77582 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
77583 + struct acl_subject_label *match;
77584 +
77585 + match = role->subj_hash[index];
77586 +
77587 + while (match && (match->inode != oldinode ||
77588 + match->device != olddevice ||
77589 + !(match->mode & GR_DELETED)))
77590 + match = match->next;
77591 +
77592 + if (match && (match->inode == oldinode)
77593 + && (match->device == olddevice)
77594 + && (match->mode & GR_DELETED)) {
77595 + if (match->prev == NULL) {
77596 + role->subj_hash[index] = match->next;
77597 + if (match->next != NULL)
77598 + match->next->prev = NULL;
77599 + } else {
77600 + match->prev->next = match->next;
77601 + if (match->next != NULL)
77602 + match->next->prev = match->prev;
77603 + }
77604 + match->prev = NULL;
77605 + match->next = NULL;
77606 + match->inode = newinode;
77607 + match->device = newdevice;
77608 + match->mode &= ~GR_DELETED;
77609 +
77610 + insert_acl_subj_label(match, role);
77611 + }
77612 +
77613 + return;
77614 +}
77615 +
77616 +static void
77617 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
77618 + const ino_t newinode, const dev_t newdevice)
77619 +{
77620 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
77621 + struct inodev_entry *match;
77622 +
77623 + match = inodev_set.i_hash[index];
77624 +
77625 + while (match && (match->nentry->inode != oldinode ||
77626 + match->nentry->device != olddevice || !match->nentry->deleted))
77627 + match = match->next;
77628 +
77629 + if (match && (match->nentry->inode == oldinode)
77630 + && (match->nentry->device == olddevice) &&
77631 + match->nentry->deleted) {
77632 + if (match->prev == NULL) {
77633 + inodev_set.i_hash[index] = match->next;
77634 + if (match->next != NULL)
77635 + match->next->prev = NULL;
77636 + } else {
77637 + match->prev->next = match->next;
77638 + if (match->next != NULL)
77639 + match->next->prev = match->prev;
77640 + }
77641 + match->prev = NULL;
77642 + match->next = NULL;
77643 + match->nentry->inode = newinode;
77644 + match->nentry->device = newdevice;
77645 + match->nentry->deleted = 0;
77646 +
77647 + insert_inodev_entry(match);
77648 + }
77649 +
77650 + return;
77651 +}
77652 +
77653 +static void
77654 +__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
77655 +{
77656 + struct acl_subject_label *subj;
77657 + struct acl_role_label *role;
77658 + unsigned int x;
77659 +
77660 + FOR_EACH_ROLE_START(role)
77661 + update_acl_subj_label(matchn->inode, matchn->device,
77662 + inode, dev, role);
77663 +
77664 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
77665 + if ((subj->inode == inode) && (subj->device == dev)) {
77666 + subj->inode = inode;
77667 + subj->device = dev;
77668 + }
77669 + FOR_EACH_NESTED_SUBJECT_END(subj)
77670 + FOR_EACH_SUBJECT_START(role, subj, x)
77671 + update_acl_obj_label(matchn->inode, matchn->device,
77672 + inode, dev, subj);
77673 + FOR_EACH_SUBJECT_END(subj,x)
77674 + FOR_EACH_ROLE_END(role)
77675 +
77676 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
77677 +
77678 + return;
77679 +}
77680 +
77681 +static void
77682 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
77683 + const struct vfsmount *mnt)
77684 +{
77685 + ino_t ino = dentry->d_inode->i_ino;
77686 + dev_t dev = __get_dev(dentry);
77687 +
77688 + __do_handle_create(matchn, ino, dev);
77689 +
77690 + return;
77691 +}
77692 +
77693 +void
77694 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77695 +{
77696 + struct name_entry *matchn;
77697 +
77698 + if (unlikely(!(gr_status & GR_READY)))
77699 + return;
77700 +
77701 + preempt_disable();
77702 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
77703 +
77704 + if (unlikely((unsigned long)matchn)) {
77705 + write_lock(&gr_inode_lock);
77706 + do_handle_create(matchn, dentry, mnt);
77707 + write_unlock(&gr_inode_lock);
77708 + }
77709 + preempt_enable();
77710 +
77711 + return;
77712 +}
77713 +
77714 +void
77715 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77716 +{
77717 + struct name_entry *matchn;
77718 +
77719 + if (unlikely(!(gr_status & GR_READY)))
77720 + return;
77721 +
77722 + preempt_disable();
77723 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
77724 +
77725 + if (unlikely((unsigned long)matchn)) {
77726 + write_lock(&gr_inode_lock);
77727 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
77728 + write_unlock(&gr_inode_lock);
77729 + }
77730 + preempt_enable();
77731 +
77732 + return;
77733 +}
77734 +
77735 +void
77736 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77737 + struct dentry *old_dentry,
77738 + struct dentry *new_dentry,
77739 + struct vfsmount *mnt, const __u8 replace)
77740 +{
77741 + struct name_entry *matchn;
77742 + struct inodev_entry *inodev;
77743 + struct inode *inode = new_dentry->d_inode;
77744 + ino_t oldinode = old_dentry->d_inode->i_ino;
77745 + dev_t olddev = __get_dev(old_dentry);
77746 +
77747 + /* vfs_rename swaps the name and parent link for old_dentry and
77748 + new_dentry
77749 + at this point, old_dentry has the new name, parent link, and inode
77750 + for the renamed file
77751 + if a file is being replaced by a rename, new_dentry has the inode
77752 + and name for the replaced file
77753 + */
77754 +
77755 + if (unlikely(!(gr_status & GR_READY)))
77756 + return;
77757 +
77758 + preempt_disable();
77759 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
77760 +
77761 + /* we wouldn't have to check d_inode if it weren't for
77762 + NFS silly-renaming
77763 + */
77764 +
77765 + write_lock(&gr_inode_lock);
77766 + if (unlikely(replace && inode)) {
77767 + ino_t newinode = inode->i_ino;
77768 + dev_t newdev = __get_dev(new_dentry);
77769 + inodev = lookup_inodev_entry(newinode, newdev);
77770 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
77771 + do_handle_delete(inodev, newinode, newdev);
77772 + }
77773 +
77774 + inodev = lookup_inodev_entry(oldinode, olddev);
77775 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
77776 + do_handle_delete(inodev, oldinode, olddev);
77777 +
77778 + if (unlikely((unsigned long)matchn))
77779 + do_handle_create(matchn, old_dentry, mnt);
77780 +
77781 + write_unlock(&gr_inode_lock);
77782 + preempt_enable();
77783 +
77784 + return;
77785 +}
77786 +
77787 +static int
77788 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
77789 + unsigned char **sum)
77790 +{
77791 + struct acl_role_label *r;
77792 + struct role_allowed_ip *ipp;
77793 + struct role_transition *trans;
77794 + unsigned int i;
77795 + int found = 0;
77796 + u32 curr_ip = current->signal->curr_ip;
77797 +
77798 + current->signal->saved_ip = curr_ip;
77799 +
77800 + /* check transition table */
77801 +
77802 + for (trans = current->role->transitions; trans; trans = trans->next) {
77803 + if (!strcmp(rolename, trans->rolename)) {
77804 + found = 1;
77805 + break;
77806 + }
77807 + }
77808 +
77809 + if (!found)
77810 + return 0;
77811 +
77812 + /* handle special roles that do not require authentication
77813 + and check ip */
77814 +
77815 + FOR_EACH_ROLE_START(r)
77816 + if (!strcmp(rolename, r->rolename) &&
77817 + (r->roletype & GR_ROLE_SPECIAL)) {
77818 + found = 0;
77819 + if (r->allowed_ips != NULL) {
77820 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
77821 + if ((ntohl(curr_ip) & ipp->netmask) ==
77822 + (ntohl(ipp->addr) & ipp->netmask))
77823 + found = 1;
77824 + }
77825 + } else
77826 + found = 2;
77827 + if (!found)
77828 + return 0;
77829 +
77830 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
77831 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
77832 + *salt = NULL;
77833 + *sum = NULL;
77834 + return 1;
77835 + }
77836 + }
77837 + FOR_EACH_ROLE_END(r)
77838 +
77839 + for (i = 0; i < num_sprole_pws; i++) {
77840 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
77841 + *salt = acl_special_roles[i]->salt;
77842 + *sum = acl_special_roles[i]->sum;
77843 + return 1;
77844 + }
77845 + }
77846 +
77847 + return 0;
77848 +}
77849 +
77850 +static void
77851 +assign_special_role(char *rolename)
77852 +{
77853 + struct acl_object_label *obj;
77854 + struct acl_role_label *r;
77855 + struct acl_role_label *assigned = NULL;
77856 + struct task_struct *tsk;
77857 + struct file *filp;
77858 +
77859 + FOR_EACH_ROLE_START(r)
77860 + if (!strcmp(rolename, r->rolename) &&
77861 + (r->roletype & GR_ROLE_SPECIAL)) {
77862 + assigned = r;
77863 + break;
77864 + }
77865 + FOR_EACH_ROLE_END(r)
77866 +
77867 + if (!assigned)
77868 + return;
77869 +
77870 + read_lock(&tasklist_lock);
77871 + read_lock(&grsec_exec_file_lock);
77872 +
77873 + tsk = current->real_parent;
77874 + if (tsk == NULL)
77875 + goto out_unlock;
77876 +
77877 + filp = tsk->exec_file;
77878 + if (filp == NULL)
77879 + goto out_unlock;
77880 +
77881 + tsk->is_writable = 0;
77882 +
77883 + tsk->acl_sp_role = 1;
77884 + tsk->acl_role_id = ++acl_sp_role_value;
77885 + tsk->role = assigned;
77886 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
77887 +
77888 + /* ignore additional mmap checks for processes that are writable
77889 + by the default ACL */
77890 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
77891 + if (unlikely(obj->mode & GR_WRITE))
77892 + tsk->is_writable = 1;
77893 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
77894 + if (unlikely(obj->mode & GR_WRITE))
77895 + tsk->is_writable = 1;
77896 +
77897 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77898 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
77899 +#endif
77900 +
77901 +out_unlock:
77902 + read_unlock(&grsec_exec_file_lock);
77903 + read_unlock(&tasklist_lock);
77904 + return;
77905 +}
77906 +
77907 +int gr_check_secure_terminal(struct task_struct *task)
77908 +{
77909 + struct task_struct *p, *p2, *p3;
77910 + struct files_struct *files;
77911 + struct fdtable *fdt;
77912 + struct file *our_file = NULL, *file;
77913 + int i;
77914 +
77915 + if (task->signal->tty == NULL)
77916 + return 1;
77917 +
77918 + files = get_files_struct(task);
77919 + if (files != NULL) {
77920 + rcu_read_lock();
77921 + fdt = files_fdtable(files);
77922 + for (i=0; i < fdt->max_fds; i++) {
77923 + file = fcheck_files(files, i);
77924 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
77925 + get_file(file);
77926 + our_file = file;
77927 + }
77928 + }
77929 + rcu_read_unlock();
77930 + put_files_struct(files);
77931 + }
77932 +
77933 + if (our_file == NULL)
77934 + return 1;
77935 +
77936 + read_lock(&tasklist_lock);
77937 + do_each_thread(p2, p) {
77938 + files = get_files_struct(p);
77939 + if (files == NULL ||
77940 + (p->signal && p->signal->tty == task->signal->tty)) {
77941 + if (files != NULL)
77942 + put_files_struct(files);
77943 + continue;
77944 + }
77945 + rcu_read_lock();
77946 + fdt = files_fdtable(files);
77947 + for (i=0; i < fdt->max_fds; i++) {
77948 + file = fcheck_files(files, i);
77949 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
77950 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
77951 + p3 = task;
77952 + while (p3->pid > 0) {
77953 + if (p3 == p)
77954 + break;
77955 + p3 = p3->real_parent;
77956 + }
77957 + if (p3 == p)
77958 + break;
77959 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
77960 + gr_handle_alertkill(p);
77961 + rcu_read_unlock();
77962 + put_files_struct(files);
77963 + read_unlock(&tasklist_lock);
77964 + fput(our_file);
77965 + return 0;
77966 + }
77967 + }
77968 + rcu_read_unlock();
77969 + put_files_struct(files);
77970 + } while_each_thread(p2, p);
77971 + read_unlock(&tasklist_lock);
77972 +
77973 + fput(our_file);
77974 + return 1;
77975 +}
77976 +
77977 +ssize_t
77978 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
77979 +{
77980 + struct gr_arg_wrapper uwrap;
77981 + unsigned char *sprole_salt = NULL;
77982 + unsigned char *sprole_sum = NULL;
77983 + int error = sizeof (struct gr_arg_wrapper);
77984 + int error2 = 0;
77985 +
77986 + mutex_lock(&gr_dev_mutex);
77987 +
77988 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
77989 + error = -EPERM;
77990 + goto out;
77991 + }
77992 +
77993 + if (count != sizeof (struct gr_arg_wrapper)) {
77994 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
77995 + error = -EINVAL;
77996 + goto out;
77997 + }
77998 +
77999 +
78000 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
78001 + gr_auth_expires = 0;
78002 + gr_auth_attempts = 0;
78003 + }
78004 +
78005 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
78006 + error = -EFAULT;
78007 + goto out;
78008 + }
78009 +
78010 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
78011 + error = -EINVAL;
78012 + goto out;
78013 + }
78014 +
78015 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
78016 + error = -EFAULT;
78017 + goto out;
78018 + }
78019 +
78020 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
78021 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
78022 + time_after(gr_auth_expires, get_seconds())) {
78023 + error = -EBUSY;
78024 + goto out;
78025 + }
78026 +
78027 + /* if non-root trying to do anything other than use a special role,
78028 + do not attempt authentication, do not count towards authentication
78029 + locking
78030 + */
78031 +
78032 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
78033 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
78034 + current_uid()) {
78035 + error = -EPERM;
78036 + goto out;
78037 + }
78038 +
78039 + /* ensure pw and special role name are null terminated */
78040 +
78041 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
78042 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
78043 +
78044 + /* Okay.
78045 + * We have our enough of the argument structure..(we have yet
78046 + * to copy_from_user the tables themselves) . Copy the tables
78047 + * only if we need them, i.e. for loading operations. */
78048 +
78049 + switch (gr_usermode->mode) {
78050 + case GR_STATUS:
78051 + if (gr_status & GR_READY) {
78052 + error = 1;
78053 + if (!gr_check_secure_terminal(current))
78054 + error = 3;
78055 + } else
78056 + error = 2;
78057 + goto out;
78058 + case GR_SHUTDOWN:
78059 + if ((gr_status & GR_READY)
78060 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78061 + pax_open_kernel();
78062 + gr_status &= ~GR_READY;
78063 + pax_close_kernel();
78064 +
78065 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
78066 + free_variables();
78067 + memset(gr_usermode, 0, sizeof (struct gr_arg));
78068 + memset(gr_system_salt, 0, GR_SALT_LEN);
78069 + memset(gr_system_sum, 0, GR_SHA_LEN);
78070 + } else if (gr_status & GR_READY) {
78071 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
78072 + error = -EPERM;
78073 + } else {
78074 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
78075 + error = -EAGAIN;
78076 + }
78077 + break;
78078 + case GR_ENABLE:
78079 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
78080 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
78081 + else {
78082 + if (gr_status & GR_READY)
78083 + error = -EAGAIN;
78084 + else
78085 + error = error2;
78086 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
78087 + }
78088 + break;
78089 + case GR_RELOAD:
78090 + if (!(gr_status & GR_READY)) {
78091 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
78092 + error = -EAGAIN;
78093 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78094 + lock_kernel();
78095 +
78096 + pax_open_kernel();
78097 + gr_status &= ~GR_READY;
78098 + pax_close_kernel();
78099 +
78100 + free_variables();
78101 + if (!(error2 = gracl_init(gr_usermode))) {
78102 + unlock_kernel();
78103 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
78104 + } else {
78105 + unlock_kernel();
78106 + error = error2;
78107 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
78108 + }
78109 + } else {
78110 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
78111 + error = -EPERM;
78112 + }
78113 + break;
78114 + case GR_SEGVMOD:
78115 + if (unlikely(!(gr_status & GR_READY))) {
78116 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
78117 + error = -EAGAIN;
78118 + break;
78119 + }
78120 +
78121 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78122 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
78123 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
78124 + struct acl_subject_label *segvacl;
78125 + segvacl =
78126 + lookup_acl_subj_label(gr_usermode->segv_inode,
78127 + gr_usermode->segv_device,
78128 + current->role);
78129 + if (segvacl) {
78130 + segvacl->crashes = 0;
78131 + segvacl->expires = 0;
78132 + }
78133 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
78134 + gr_remove_uid(gr_usermode->segv_uid);
78135 + }
78136 + } else {
78137 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
78138 + error = -EPERM;
78139 + }
78140 + break;
78141 + case GR_SPROLE:
78142 + case GR_SPROLEPAM:
78143 + if (unlikely(!(gr_status & GR_READY))) {
78144 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
78145 + error = -EAGAIN;
78146 + break;
78147 + }
78148 +
78149 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
78150 + current->role->expires = 0;
78151 + current->role->auth_attempts = 0;
78152 + }
78153 +
78154 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
78155 + time_after(current->role->expires, get_seconds())) {
78156 + error = -EBUSY;
78157 + goto out;
78158 + }
78159 +
78160 + if (lookup_special_role_auth
78161 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
78162 + && ((!sprole_salt && !sprole_sum)
78163 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
78164 + char *p = "";
78165 + assign_special_role(gr_usermode->sp_role);
78166 + read_lock(&tasklist_lock);
78167 + if (current->real_parent)
78168 + p = current->real_parent->role->rolename;
78169 + read_unlock(&tasklist_lock);
78170 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
78171 + p, acl_sp_role_value);
78172 + } else {
78173 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
78174 + error = -EPERM;
78175 + if(!(current->role->auth_attempts++))
78176 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
78177 +
78178 + goto out;
78179 + }
78180 + break;
78181 + case GR_UNSPROLE:
78182 + if (unlikely(!(gr_status & GR_READY))) {
78183 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
78184 + error = -EAGAIN;
78185 + break;
78186 + }
78187 +
78188 + if (current->role->roletype & GR_ROLE_SPECIAL) {
78189 + char *p = "";
78190 + int i = 0;
78191 +
78192 + read_lock(&tasklist_lock);
78193 + if (current->real_parent) {
78194 + p = current->real_parent->role->rolename;
78195 + i = current->real_parent->acl_role_id;
78196 + }
78197 + read_unlock(&tasklist_lock);
78198 +
78199 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
78200 + gr_set_acls(1);
78201 + } else {
78202 + error = -EPERM;
78203 + goto out;
78204 + }
78205 + break;
78206 + default:
78207 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
78208 + error = -EINVAL;
78209 + break;
78210 + }
78211 +
78212 + if (error != -EPERM)
78213 + goto out;
78214 +
78215 + if(!(gr_auth_attempts++))
78216 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
78217 +
78218 + out:
78219 + mutex_unlock(&gr_dev_mutex);
78220 + return error;
78221 +}
78222 +
78223 +/* must be called with
78224 + rcu_read_lock();
78225 + read_lock(&tasklist_lock);
78226 + read_lock(&grsec_exec_file_lock);
78227 +*/
78228 +int gr_apply_subject_to_task(struct task_struct *task)
78229 +{
78230 + struct acl_object_label *obj;
78231 + char *tmpname;
78232 + struct acl_subject_label *tmpsubj;
78233 + struct file *filp;
78234 + struct name_entry *nmatch;
78235 +
78236 + filp = task->exec_file;
78237 + if (filp == NULL)
78238 + return 0;
78239 +
78240 + /* the following is to apply the correct subject
78241 + on binaries running when the RBAC system
78242 + is enabled, when the binaries have been
78243 + replaced or deleted since their execution
78244 + -----
78245 + when the RBAC system starts, the inode/dev
78246 + from exec_file will be one the RBAC system
78247 + is unaware of. It only knows the inode/dev
78248 + of the present file on disk, or the absence
78249 + of it.
78250 + */
78251 + preempt_disable();
78252 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
78253 +
78254 + nmatch = lookup_name_entry(tmpname);
78255 + preempt_enable();
78256 + tmpsubj = NULL;
78257 + if (nmatch) {
78258 + if (nmatch->deleted)
78259 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
78260 + else
78261 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
78262 + if (tmpsubj != NULL)
78263 + task->acl = tmpsubj;
78264 + }
78265 + if (tmpsubj == NULL)
78266 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
78267 + task->role);
78268 + if (task->acl) {
78269 + task->is_writable = 0;
78270 + /* ignore additional mmap checks for processes that are writable
78271 + by the default ACL */
78272 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
78273 + if (unlikely(obj->mode & GR_WRITE))
78274 + task->is_writable = 1;
78275 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
78276 + if (unlikely(obj->mode & GR_WRITE))
78277 + task->is_writable = 1;
78278 +
78279 + gr_set_proc_res(task);
78280 +
78281 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
78282 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
78283 +#endif
78284 + } else {
78285 + return 1;
78286 + }
78287 +
78288 + return 0;
78289 +}
78290 +
78291 +int
78292 +gr_set_acls(const int type)
78293 +{
78294 + struct task_struct *task, *task2;
78295 + struct acl_role_label *role = current->role;
78296 + __u16 acl_role_id = current->acl_role_id;
78297 + const struct cred *cred;
78298 + int ret;
78299 +
78300 + rcu_read_lock();
78301 + read_lock(&tasklist_lock);
78302 + read_lock(&grsec_exec_file_lock);
78303 + do_each_thread(task2, task) {
78304 + /* check to see if we're called from the exit handler,
78305 + if so, only replace ACLs that have inherited the admin
78306 + ACL */
78307 +
78308 + if (type && (task->role != role ||
78309 + task->acl_role_id != acl_role_id))
78310 + continue;
78311 +
78312 + task->acl_role_id = 0;
78313 + task->acl_sp_role = 0;
78314 +
78315 + if (task->exec_file) {
78316 + cred = __task_cred(task);
78317 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
78318 +
78319 + ret = gr_apply_subject_to_task(task);
78320 + if (ret) {
78321 + read_unlock(&grsec_exec_file_lock);
78322 + read_unlock(&tasklist_lock);
78323 + rcu_read_unlock();
78324 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
78325 + return ret;
78326 + }
78327 + } else {
78328 + // it's a kernel process
78329 + task->role = kernel_role;
78330 + task->acl = kernel_role->root_label;
78331 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
78332 + task->acl->mode &= ~GR_PROCFIND;
78333 +#endif
78334 + }
78335 + } while_each_thread(task2, task);
78336 + read_unlock(&grsec_exec_file_lock);
78337 + read_unlock(&tasklist_lock);
78338 + rcu_read_unlock();
78339 +
78340 + return 0;
78341 +}
78342 +
78343 +void
78344 +gr_learn_resource(const struct task_struct *task,
78345 + const int res, const unsigned long wanted, const int gt)
78346 +{
78347 + struct acl_subject_label *acl;
78348 + const struct cred *cred;
78349 +
78350 + if (unlikely((gr_status & GR_READY) &&
78351 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
78352 + goto skip_reslog;
78353 +
78354 +#ifdef CONFIG_GRKERNSEC_RESLOG
78355 + gr_log_resource(task, res, wanted, gt);
78356 +#endif
78357 + skip_reslog:
78358 +
78359 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
78360 + return;
78361 +
78362 + acl = task->acl;
78363 +
78364 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
78365 + !(acl->resmask & (1 << (unsigned short) res))))
78366 + return;
78367 +
78368 + if (wanted >= acl->res[res].rlim_cur) {
78369 + unsigned long res_add;
78370 +
78371 + res_add = wanted;
78372 + switch (res) {
78373 + case RLIMIT_CPU:
78374 + res_add += GR_RLIM_CPU_BUMP;
78375 + break;
78376 + case RLIMIT_FSIZE:
78377 + res_add += GR_RLIM_FSIZE_BUMP;
78378 + break;
78379 + case RLIMIT_DATA:
78380 + res_add += GR_RLIM_DATA_BUMP;
78381 + break;
78382 + case RLIMIT_STACK:
78383 + res_add += GR_RLIM_STACK_BUMP;
78384 + break;
78385 + case RLIMIT_CORE:
78386 + res_add += GR_RLIM_CORE_BUMP;
78387 + break;
78388 + case RLIMIT_RSS:
78389 + res_add += GR_RLIM_RSS_BUMP;
78390 + break;
78391 + case RLIMIT_NPROC:
78392 + res_add += GR_RLIM_NPROC_BUMP;
78393 + break;
78394 + case RLIMIT_NOFILE:
78395 + res_add += GR_RLIM_NOFILE_BUMP;
78396 + break;
78397 + case RLIMIT_MEMLOCK:
78398 + res_add += GR_RLIM_MEMLOCK_BUMP;
78399 + break;
78400 + case RLIMIT_AS:
78401 + res_add += GR_RLIM_AS_BUMP;
78402 + break;
78403 + case RLIMIT_LOCKS:
78404 + res_add += GR_RLIM_LOCKS_BUMP;
78405 + break;
78406 + case RLIMIT_SIGPENDING:
78407 + res_add += GR_RLIM_SIGPENDING_BUMP;
78408 + break;
78409 + case RLIMIT_MSGQUEUE:
78410 + res_add += GR_RLIM_MSGQUEUE_BUMP;
78411 + break;
78412 + case RLIMIT_NICE:
78413 + res_add += GR_RLIM_NICE_BUMP;
78414 + break;
78415 + case RLIMIT_RTPRIO:
78416 + res_add += GR_RLIM_RTPRIO_BUMP;
78417 + break;
78418 + case RLIMIT_RTTIME:
78419 + res_add += GR_RLIM_RTTIME_BUMP;
78420 + break;
78421 + }
78422 +
78423 + acl->res[res].rlim_cur = res_add;
78424 +
78425 + if (wanted > acl->res[res].rlim_max)
78426 + acl->res[res].rlim_max = res_add;
78427 +
78428 + /* only log the subject filename, since resource logging is supported for
78429 + single-subject learning only */
78430 + rcu_read_lock();
78431 + cred = __task_cred(task);
78432 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
78433 + task->role->roletype, cred->uid, cred->gid, acl->filename,
78434 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
78435 + "", (unsigned long) res, &task->signal->saved_ip);
78436 + rcu_read_unlock();
78437 + }
78438 +
78439 + return;
78440 +}
78441 +
78442 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
78443 +void
78444 +pax_set_initial_flags(struct linux_binprm *bprm)
78445 +{
78446 + struct task_struct *task = current;
78447 + struct acl_subject_label *proc;
78448 + unsigned long flags;
78449 +
78450 + if (unlikely(!(gr_status & GR_READY)))
78451 + return;
78452 +
78453 + flags = pax_get_flags(task);
78454 +
78455 + proc = task->acl;
78456 +
78457 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
78458 + flags &= ~MF_PAX_PAGEEXEC;
78459 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
78460 + flags &= ~MF_PAX_SEGMEXEC;
78461 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
78462 + flags &= ~MF_PAX_RANDMMAP;
78463 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
78464 + flags &= ~MF_PAX_EMUTRAMP;
78465 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
78466 + flags &= ~MF_PAX_MPROTECT;
78467 +
78468 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
78469 + flags |= MF_PAX_PAGEEXEC;
78470 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
78471 + flags |= MF_PAX_SEGMEXEC;
78472 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
78473 + flags |= MF_PAX_RANDMMAP;
78474 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
78475 + flags |= MF_PAX_EMUTRAMP;
78476 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
78477 + flags |= MF_PAX_MPROTECT;
78478 +
78479 + pax_set_flags(task, flags);
78480 +
78481 + return;
78482 +}
78483 +#endif
78484 +
78485 +#ifdef CONFIG_SYSCTL
78486 +/* Eric Biederman likes breaking userland ABI and every inode-based security
78487 + system to save 35kb of memory */
78488 +
78489 +/* we modify the passed in filename, but adjust it back before returning */
78490 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
78491 +{
78492 + struct name_entry *nmatch;
78493 + char *p, *lastp = NULL;
78494 + struct acl_object_label *obj = NULL, *tmp;
78495 + struct acl_subject_label *tmpsubj;
78496 + char c = '\0';
78497 +
78498 + read_lock(&gr_inode_lock);
78499 +
78500 + p = name + len - 1;
78501 + do {
78502 + nmatch = lookup_name_entry(name);
78503 + if (lastp != NULL)
78504 + *lastp = c;
78505 +
78506 + if (nmatch == NULL)
78507 + goto next_component;
78508 + tmpsubj = current->acl;
78509 + do {
78510 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
78511 + if (obj != NULL) {
78512 + tmp = obj->globbed;
78513 + while (tmp) {
78514 + if (!glob_match(tmp->filename, name)) {
78515 + obj = tmp;
78516 + goto found_obj;
78517 + }
78518 + tmp = tmp->next;
78519 + }
78520 + goto found_obj;
78521 + }
78522 + } while ((tmpsubj = tmpsubj->parent_subject));
78523 +next_component:
78524 + /* end case */
78525 + if (p == name)
78526 + break;
78527 +
78528 + while (*p != '/')
78529 + p--;
78530 + if (p == name)
78531 + lastp = p + 1;
78532 + else {
78533 + lastp = p;
78534 + p--;
78535 + }
78536 + c = *lastp;
78537 + *lastp = '\0';
78538 + } while (1);
78539 +found_obj:
78540 + read_unlock(&gr_inode_lock);
78541 + /* obj returned will always be non-null */
78542 + return obj;
78543 +}
78544 +
78545 +/* returns 0 when allowing, non-zero on error
78546 + op of 0 is used for readdir, so we don't log the names of hidden files
78547 +*/
78548 +__u32
78549 +gr_handle_sysctl(const struct ctl_table *table, const int op)
78550 +{
78551 + ctl_table *tmp;
78552 + const char *proc_sys = "/proc/sys";
78553 + char *path;
78554 + struct acl_object_label *obj;
78555 + unsigned short len = 0, pos = 0, depth = 0, i;
78556 + __u32 err = 0;
78557 + __u32 mode = 0;
78558 +
78559 + if (unlikely(!(gr_status & GR_READY)))
78560 + return 0;
78561 +
78562 + /* for now, ignore operations on non-sysctl entries if it's not a
78563 + readdir*/
78564 + if (table->child != NULL && op != 0)
78565 + return 0;
78566 +
78567 + mode |= GR_FIND;
78568 + /* it's only a read if it's an entry, read on dirs is for readdir */
78569 + if (op & MAY_READ)
78570 + mode |= GR_READ;
78571 + if (op & MAY_WRITE)
78572 + mode |= GR_WRITE;
78573 +
78574 + preempt_disable();
78575 +
78576 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
78577 +
78578 + /* it's only a read/write if it's an actual entry, not a dir
78579 + (which are opened for readdir)
78580 + */
78581 +
78582 + /* convert the requested sysctl entry into a pathname */
78583 +
78584 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
78585 + len += strlen(tmp->procname);
78586 + len++;
78587 + depth++;
78588 + }
78589 +
78590 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
78591 + /* deny */
78592 + goto out;
78593 + }
78594 +
78595 + memset(path, 0, PAGE_SIZE);
78596 +
78597 + memcpy(path, proc_sys, strlen(proc_sys));
78598 +
78599 + pos += strlen(proc_sys);
78600 +
78601 + for (; depth > 0; depth--) {
78602 + path[pos] = '/';
78603 + pos++;
78604 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
78605 + if (depth == i) {
78606 + memcpy(path + pos, tmp->procname,
78607 + strlen(tmp->procname));
78608 + pos += strlen(tmp->procname);
78609 + }
78610 + i++;
78611 + }
78612 + }
78613 +
78614 + obj = gr_lookup_by_name(path, pos);
78615 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
78616 +
78617 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
78618 + ((err & mode) != mode))) {
78619 + __u32 new_mode = mode;
78620 +
78621 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
78622 +
78623 + err = 0;
78624 + gr_log_learn_sysctl(path, new_mode);
78625 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
78626 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
78627 + err = -ENOENT;
78628 + } else if (!(err & GR_FIND)) {
78629 + err = -ENOENT;
78630 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
78631 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
78632 + path, (mode & GR_READ) ? " reading" : "",
78633 + (mode & GR_WRITE) ? " writing" : "");
78634 + err = -EACCES;
78635 + } else if ((err & mode) != mode) {
78636 + err = -EACCES;
78637 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
78638 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
78639 + path, (mode & GR_READ) ? " reading" : "",
78640 + (mode & GR_WRITE) ? " writing" : "");
78641 + err = 0;
78642 + } else
78643 + err = 0;
78644 +
78645 + out:
78646 + preempt_enable();
78647 +
78648 + return err;
78649 +}
78650 +#endif
78651 +
78652 +int
78653 +gr_handle_proc_ptrace(struct task_struct *task)
78654 +{
78655 + struct file *filp;
78656 + struct task_struct *tmp = task;
78657 + struct task_struct *curtemp = current;
78658 + __u32 retmode;
78659 +
78660 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
78661 + if (unlikely(!(gr_status & GR_READY)))
78662 + return 0;
78663 +#endif
78664 +
78665 + read_lock(&tasklist_lock);
78666 + read_lock(&grsec_exec_file_lock);
78667 + filp = task->exec_file;
78668 +
78669 + while (tmp->pid > 0) {
78670 + if (tmp == curtemp)
78671 + break;
78672 + tmp = tmp->real_parent;
78673 + }
78674 +
78675 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
78676 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
78677 + read_unlock(&grsec_exec_file_lock);
78678 + read_unlock(&tasklist_lock);
78679 + return 1;
78680 + }
78681 +
78682 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78683 + if (!(gr_status & GR_READY)) {
78684 + read_unlock(&grsec_exec_file_lock);
78685 + read_unlock(&tasklist_lock);
78686 + return 0;
78687 + }
78688 +#endif
78689 +
78690 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
78691 + read_unlock(&grsec_exec_file_lock);
78692 + read_unlock(&tasklist_lock);
78693 +
78694 + if (retmode & GR_NOPTRACE)
78695 + return 1;
78696 +
78697 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
78698 + && (current->acl != task->acl || (current->acl != current->role->root_label
78699 + && current->pid != task->pid)))
78700 + return 1;
78701 +
78702 + return 0;
78703 +}
78704 +
78705 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
78706 +{
78707 + if (unlikely(!(gr_status & GR_READY)))
78708 + return;
78709 +
78710 + if (!(current->role->roletype & GR_ROLE_GOD))
78711 + return;
78712 +
78713 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
78714 + p->role->rolename, gr_task_roletype_to_char(p),
78715 + p->acl->filename);
78716 +}
78717 +
78718 +int
78719 +gr_handle_ptrace(struct task_struct *task, const long request)
78720 +{
78721 + struct task_struct *tmp = task;
78722 + struct task_struct *curtemp = current;
78723 + __u32 retmode;
78724 +
78725 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
78726 + if (unlikely(!(gr_status & GR_READY)))
78727 + return 0;
78728 +#endif
78729 +
78730 + read_lock(&tasklist_lock);
78731 + while (tmp->pid > 0) {
78732 + if (tmp == curtemp)
78733 + break;
78734 + tmp = tmp->real_parent;
78735 + }
78736 +
78737 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
78738 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
78739 + read_unlock(&tasklist_lock);
78740 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78741 + return 1;
78742 + }
78743 + read_unlock(&tasklist_lock);
78744 +
78745 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78746 + if (!(gr_status & GR_READY))
78747 + return 0;
78748 +#endif
78749 +
78750 + read_lock(&grsec_exec_file_lock);
78751 + if (unlikely(!task->exec_file)) {
78752 + read_unlock(&grsec_exec_file_lock);
78753 + return 0;
78754 + }
78755 +
78756 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
78757 + read_unlock(&grsec_exec_file_lock);
78758 +
78759 + if (retmode & GR_NOPTRACE) {
78760 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78761 + return 1;
78762 + }
78763 +
78764 + if (retmode & GR_PTRACERD) {
78765 + switch (request) {
78766 + case PTRACE_POKETEXT:
78767 + case PTRACE_POKEDATA:
78768 + case PTRACE_POKEUSR:
78769 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
78770 + case PTRACE_SETREGS:
78771 + case PTRACE_SETFPREGS:
78772 +#endif
78773 +#ifdef CONFIG_X86
78774 + case PTRACE_SETFPXREGS:
78775 +#endif
78776 +#ifdef CONFIG_ALTIVEC
78777 + case PTRACE_SETVRREGS:
78778 +#endif
78779 + return 1;
78780 + default:
78781 + return 0;
78782 + }
78783 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
78784 + !(current->role->roletype & GR_ROLE_GOD) &&
78785 + (current->acl != task->acl)) {
78786 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78787 + return 1;
78788 + }
78789 +
78790 + return 0;
78791 +}
78792 +
78793 +static int is_writable_mmap(const struct file *filp)
78794 +{
78795 + struct task_struct *task = current;
78796 + struct acl_object_label *obj, *obj2;
78797 +
78798 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
78799 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
78800 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
78801 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
78802 + task->role->root_label);
78803 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
78804 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
78805 + return 1;
78806 + }
78807 + }
78808 + return 0;
78809 +}
78810 +
78811 +int
78812 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
78813 +{
78814 + __u32 mode;
78815 +
78816 + if (unlikely(!file || !(prot & PROT_EXEC)))
78817 + return 1;
78818 +
78819 + if (is_writable_mmap(file))
78820 + return 0;
78821 +
78822 + mode =
78823 + gr_search_file(file->f_path.dentry,
78824 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
78825 + file->f_path.mnt);
78826 +
78827 + if (!gr_tpe_allow(file))
78828 + return 0;
78829 +
78830 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
78831 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78832 + return 0;
78833 + } else if (unlikely(!(mode & GR_EXEC))) {
78834 + return 0;
78835 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
78836 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78837 + return 1;
78838 + }
78839 +
78840 + return 1;
78841 +}
78842 +
78843 +int
78844 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
78845 +{
78846 + __u32 mode;
78847 +
78848 + if (unlikely(!file || !(prot & PROT_EXEC)))
78849 + return 1;
78850 +
78851 + if (is_writable_mmap(file))
78852 + return 0;
78853 +
78854 + mode =
78855 + gr_search_file(file->f_path.dentry,
78856 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
78857 + file->f_path.mnt);
78858 +
78859 + if (!gr_tpe_allow(file))
78860 + return 0;
78861 +
78862 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
78863 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78864 + return 0;
78865 + } else if (unlikely(!(mode & GR_EXEC))) {
78866 + return 0;
78867 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
78868 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78869 + return 1;
78870 + }
78871 +
78872 + return 1;
78873 +}
78874 +
78875 +void
78876 +gr_acl_handle_psacct(struct task_struct *task, const long code)
78877 +{
78878 + unsigned long runtime;
78879 + unsigned long cputime;
78880 + unsigned int wday, cday;
78881 + __u8 whr, chr;
78882 + __u8 wmin, cmin;
78883 + __u8 wsec, csec;
78884 + struct timespec timeval;
78885 +
78886 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
78887 + !(task->acl->mode & GR_PROCACCT)))
78888 + return;
78889 +
78890 + do_posix_clock_monotonic_gettime(&timeval);
78891 + runtime = timeval.tv_sec - task->start_time.tv_sec;
78892 + wday = runtime / (3600 * 24);
78893 + runtime -= wday * (3600 * 24);
78894 + whr = runtime / 3600;
78895 + runtime -= whr * 3600;
78896 + wmin = runtime / 60;
78897 + runtime -= wmin * 60;
78898 + wsec = runtime;
78899 +
78900 + cputime = (task->utime + task->stime) / HZ;
78901 + cday = cputime / (3600 * 24);
78902 + cputime -= cday * (3600 * 24);
78903 + chr = cputime / 3600;
78904 + cputime -= chr * 3600;
78905 + cmin = cputime / 60;
78906 + cputime -= cmin * 60;
78907 + csec = cputime;
78908 +
78909 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
78910 +
78911 + return;
78912 +}
78913 +
78914 +void gr_set_kernel_label(struct task_struct *task)
78915 +{
78916 + if (gr_status & GR_READY) {
78917 + task->role = kernel_role;
78918 + task->acl = kernel_role->root_label;
78919 + }
78920 + return;
78921 +}
78922 +
78923 +#ifdef CONFIG_TASKSTATS
78924 +int gr_is_taskstats_denied(int pid)
78925 +{
78926 + struct task_struct *task;
78927 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78928 + const struct cred *cred;
78929 +#endif
78930 + int ret = 0;
78931 +
78932 + /* restrict taskstats viewing to un-chrooted root users
78933 + who have the 'view' subject flag if the RBAC system is enabled
78934 + */
78935 +
78936 + rcu_read_lock();
78937 + read_lock(&tasklist_lock);
78938 + task = find_task_by_vpid(pid);
78939 + if (task) {
78940 +#ifdef CONFIG_GRKERNSEC_CHROOT
78941 + if (proc_is_chrooted(task))
78942 + ret = -EACCES;
78943 +#endif
78944 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78945 + cred = __task_cred(task);
78946 +#ifdef CONFIG_GRKERNSEC_PROC_USER
78947 + if (cred->uid != 0)
78948 + ret = -EACCES;
78949 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78950 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
78951 + ret = -EACCES;
78952 +#endif
78953 +#endif
78954 + if (gr_status & GR_READY) {
78955 + if (!(task->acl->mode & GR_VIEW))
78956 + ret = -EACCES;
78957 + }
78958 + } else
78959 + ret = -ENOENT;
78960 +
78961 + read_unlock(&tasklist_lock);
78962 + rcu_read_unlock();
78963 +
78964 + return ret;
78965 +}
78966 +#endif
78967 +
78968 +/* AUXV entries are filled via a descendant of search_binary_handler
78969 + after we've already applied the subject for the target
78970 +*/
78971 +int gr_acl_enable_at_secure(void)
78972 +{
78973 + if (unlikely(!(gr_status & GR_READY)))
78974 + return 0;
78975 +
78976 + if (current->acl->mode & GR_ATSECURE)
78977 + return 1;
78978 +
78979 + return 0;
78980 +}
78981 +
78982 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
78983 +{
78984 + struct task_struct *task = current;
78985 + struct dentry *dentry = file->f_path.dentry;
78986 + struct vfsmount *mnt = file->f_path.mnt;
78987 + struct acl_object_label *obj, *tmp;
78988 + struct acl_subject_label *subj;
78989 + unsigned int bufsize;
78990 + int is_not_root;
78991 + char *path;
78992 + dev_t dev = __get_dev(dentry);
78993 +
78994 + if (unlikely(!(gr_status & GR_READY)))
78995 + return 1;
78996 +
78997 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
78998 + return 1;
78999 +
79000 + /* ignore Eric Biederman */
79001 + if (IS_PRIVATE(dentry->d_inode))
79002 + return 1;
79003 +
79004 + subj = task->acl;
79005 + do {
79006 + obj = lookup_acl_obj_label(ino, dev, subj);
79007 + if (obj != NULL)
79008 + return (obj->mode & GR_FIND) ? 1 : 0;
79009 + } while ((subj = subj->parent_subject));
79010 +
79011 + /* this is purely an optimization since we're looking for an object
79012 + for the directory we're doing a readdir on
79013 + if it's possible for any globbed object to match the entry we're
79014 + filling into the directory, then the object we find here will be
79015 + an anchor point with attached globbed objects
79016 + */
79017 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
79018 + if (obj->globbed == NULL)
79019 + return (obj->mode & GR_FIND) ? 1 : 0;
79020 +
79021 + is_not_root = ((obj->filename[0] == '/') &&
79022 + (obj->filename[1] == '\0')) ? 0 : 1;
79023 + bufsize = PAGE_SIZE - namelen - is_not_root;
79024 +
79025 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
79026 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
79027 + return 1;
79028 +
79029 + preempt_disable();
79030 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
79031 + bufsize);
79032 +
79033 + bufsize = strlen(path);
79034 +
79035 + /* if base is "/", don't append an additional slash */
79036 + if (is_not_root)
79037 + *(path + bufsize) = '/';
79038 + memcpy(path + bufsize + is_not_root, name, namelen);
79039 + *(path + bufsize + namelen + is_not_root) = '\0';
79040 +
79041 + tmp = obj->globbed;
79042 + while (tmp) {
79043 + if (!glob_match(tmp->filename, path)) {
79044 + preempt_enable();
79045 + return (tmp->mode & GR_FIND) ? 1 : 0;
79046 + }
79047 + tmp = tmp->next;
79048 + }
79049 + preempt_enable();
79050 + return (obj->mode & GR_FIND) ? 1 : 0;
79051 +}
79052 +
79053 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
79054 +EXPORT_SYMBOL(gr_acl_is_enabled);
79055 +#endif
79056 +EXPORT_SYMBOL(gr_learn_resource);
79057 +EXPORT_SYMBOL(gr_set_kernel_label);
79058 +#ifdef CONFIG_SECURITY
79059 +EXPORT_SYMBOL(gr_check_user_change);
79060 +EXPORT_SYMBOL(gr_check_group_change);
79061 +#endif
79062 +
79063 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
79064 new file mode 100644
79065 index 0000000..34fefda
79066 --- /dev/null
79067 +++ b/grsecurity/gracl_alloc.c
79068 @@ -0,0 +1,105 @@
79069 +#include <linux/kernel.h>
79070 +#include <linux/mm.h>
79071 +#include <linux/slab.h>
79072 +#include <linux/vmalloc.h>
79073 +#include <linux/gracl.h>
79074 +#include <linux/grsecurity.h>
79075 +
79076 +static unsigned long alloc_stack_next = 1;
79077 +static unsigned long alloc_stack_size = 1;
79078 +static void **alloc_stack;
79079 +
79080 +static __inline__ int
79081 +alloc_pop(void)
79082 +{
79083 + if (alloc_stack_next == 1)
79084 + return 0;
79085 +
79086 + kfree(alloc_stack[alloc_stack_next - 2]);
79087 +
79088 + alloc_stack_next--;
79089 +
79090 + return 1;
79091 +}
79092 +
79093 +static __inline__ int
79094 +alloc_push(void *buf)
79095 +{
79096 + if (alloc_stack_next >= alloc_stack_size)
79097 + return 1;
79098 +
79099 + alloc_stack[alloc_stack_next - 1] = buf;
79100 +
79101 + alloc_stack_next++;
79102 +
79103 + return 0;
79104 +}
79105 +
79106 +void *
79107 +acl_alloc(unsigned long len)
79108 +{
79109 + void *ret = NULL;
79110 +
79111 + if (!len || len > PAGE_SIZE)
79112 + goto out;
79113 +
79114 + ret = kmalloc(len, GFP_KERNEL);
79115 +
79116 + if (ret) {
79117 + if (alloc_push(ret)) {
79118 + kfree(ret);
79119 + ret = NULL;
79120 + }
79121 + }
79122 +
79123 +out:
79124 + return ret;
79125 +}
79126 +
79127 +void *
79128 +acl_alloc_num(unsigned long num, unsigned long len)
79129 +{
79130 + if (!len || (num > (PAGE_SIZE / len)))
79131 + return NULL;
79132 +
79133 + return acl_alloc(num * len);
79134 +}
79135 +
79136 +void
79137 +acl_free_all(void)
79138 +{
79139 + if (gr_acl_is_enabled() || !alloc_stack)
79140 + return;
79141 +
79142 + while (alloc_pop()) ;
79143 +
79144 + if (alloc_stack) {
79145 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
79146 + kfree(alloc_stack);
79147 + else
79148 + vfree(alloc_stack);
79149 + }
79150 +
79151 + alloc_stack = NULL;
79152 + alloc_stack_size = 1;
79153 + alloc_stack_next = 1;
79154 +
79155 + return;
79156 +}
79157 +
79158 +int
79159 +acl_alloc_stack_init(unsigned long size)
79160 +{
79161 + if ((size * sizeof (void *)) <= PAGE_SIZE)
79162 + alloc_stack =
79163 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
79164 + else
79165 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
79166 +
79167 + alloc_stack_size = size;
79168 +
79169 + if (!alloc_stack)
79170 + return 0;
79171 + else
79172 + return 1;
79173 +}
79174 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
79175 new file mode 100644
79176 index 0000000..955ddfb
79177 --- /dev/null
79178 +++ b/grsecurity/gracl_cap.c
79179 @@ -0,0 +1,101 @@
79180 +#include <linux/kernel.h>
79181 +#include <linux/module.h>
79182 +#include <linux/sched.h>
79183 +#include <linux/gracl.h>
79184 +#include <linux/grsecurity.h>
79185 +#include <linux/grinternal.h>
79186 +
79187 +extern const char *captab_log[];
79188 +extern int captab_log_entries;
79189 +
79190 +int
79191 +gr_acl_is_capable(const int cap)
79192 +{
79193 + struct task_struct *task = current;
79194 + const struct cred *cred = current_cred();
79195 + struct acl_subject_label *curracl;
79196 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
79197 + kernel_cap_t cap_audit = __cap_empty_set;
79198 +
79199 + if (!gr_acl_is_enabled())
79200 + return 1;
79201 +
79202 + curracl = task->acl;
79203 +
79204 + cap_drop = curracl->cap_lower;
79205 + cap_mask = curracl->cap_mask;
79206 + cap_audit = curracl->cap_invert_audit;
79207 +
79208 + while ((curracl = curracl->parent_subject)) {
79209 + /* if the cap isn't specified in the current computed mask but is specified in the
79210 + current level subject, and is lowered in the current level subject, then add
79211 + it to the set of dropped capabilities
79212 + otherwise, add the current level subject's mask to the current computed mask
79213 + */
79214 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
79215 + cap_raise(cap_mask, cap);
79216 + if (cap_raised(curracl->cap_lower, cap))
79217 + cap_raise(cap_drop, cap);
79218 + if (cap_raised(curracl->cap_invert_audit, cap))
79219 + cap_raise(cap_audit, cap);
79220 + }
79221 + }
79222 +
79223 + if (!cap_raised(cap_drop, cap)) {
79224 + if (cap_raised(cap_audit, cap))
79225 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
79226 + return 1;
79227 + }
79228 +
79229 + curracl = task->acl;
79230 +
79231 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
79232 + && cap_raised(cred->cap_effective, cap)) {
79233 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
79234 + task->role->roletype, cred->uid,
79235 + cred->gid, task->exec_file ?
79236 + gr_to_filename(task->exec_file->f_path.dentry,
79237 + task->exec_file->f_path.mnt) : curracl->filename,
79238 + curracl->filename, 0UL,
79239 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
79240 + return 1;
79241 + }
79242 +
79243 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
79244 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
79245 + return 0;
79246 +}
79247 +
79248 +int
79249 +gr_acl_is_capable_nolog(const int cap)
79250 +{
79251 + struct acl_subject_label *curracl;
79252 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
79253 +
79254 + if (!gr_acl_is_enabled())
79255 + return 1;
79256 +
79257 + curracl = current->acl;
79258 +
79259 + cap_drop = curracl->cap_lower;
79260 + cap_mask = curracl->cap_mask;
79261 +
79262 + while ((curracl = curracl->parent_subject)) {
79263 + /* if the cap isn't specified in the current computed mask but is specified in the
79264 + current level subject, and is lowered in the current level subject, then add
79265 + it to the set of dropped capabilities
79266 + otherwise, add the current level subject's mask to the current computed mask
79267 + */
79268 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
79269 + cap_raise(cap_mask, cap);
79270 + if (cap_raised(curracl->cap_lower, cap))
79271 + cap_raise(cap_drop, cap);
79272 + }
79273 + }
79274 +
79275 + if (!cap_raised(cap_drop, cap))
79276 + return 1;
79277 +
79278 + return 0;
79279 +}
79280 +
79281 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
79282 new file mode 100644
79283 index 0000000..523e7e8
79284 --- /dev/null
79285 +++ b/grsecurity/gracl_fs.c
79286 @@ -0,0 +1,435 @@
79287 +#include <linux/kernel.h>
79288 +#include <linux/sched.h>
79289 +#include <linux/types.h>
79290 +#include <linux/fs.h>
79291 +#include <linux/file.h>
79292 +#include <linux/stat.h>
79293 +#include <linux/grsecurity.h>
79294 +#include <linux/grinternal.h>
79295 +#include <linux/gracl.h>
79296 +
79297 +umode_t
79298 +gr_acl_umask(void)
79299 +{
79300 + if (unlikely(!gr_acl_is_enabled()))
79301 + return 0;
79302 +
79303 + return current->role->umask;
79304 +}
79305 +
79306 +__u32
79307 +gr_acl_handle_hidden_file(const struct dentry * dentry,
79308 + const struct vfsmount * mnt)
79309 +{
79310 + __u32 mode;
79311 +
79312 + if (unlikely(!dentry->d_inode))
79313 + return GR_FIND;
79314 +
79315 + mode =
79316 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
79317 +
79318 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
79319 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
79320 + return mode;
79321 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
79322 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
79323 + return 0;
79324 + } else if (unlikely(!(mode & GR_FIND)))
79325 + return 0;
79326 +
79327 + return GR_FIND;
79328 +}
79329 +
79330 +__u32
79331 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
79332 + int acc_mode)
79333 +{
79334 + __u32 reqmode = GR_FIND;
79335 + __u32 mode;
79336 +
79337 + if (unlikely(!dentry->d_inode))
79338 + return reqmode;
79339 +
79340 + if (acc_mode & MAY_APPEND)
79341 + reqmode |= GR_APPEND;
79342 + else if (acc_mode & MAY_WRITE)
79343 + reqmode |= GR_WRITE;
79344 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
79345 + reqmode |= GR_READ;
79346 +
79347 + mode =
79348 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
79349 + mnt);
79350 +
79351 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79352 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
79353 + reqmode & GR_READ ? " reading" : "",
79354 + reqmode & GR_WRITE ? " writing" : reqmode &
79355 + GR_APPEND ? " appending" : "");
79356 + return reqmode;
79357 + } else
79358 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79359 + {
79360 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
79361 + reqmode & GR_READ ? " reading" : "",
79362 + reqmode & GR_WRITE ? " writing" : reqmode &
79363 + GR_APPEND ? " appending" : "");
79364 + return 0;
79365 + } else if (unlikely((mode & reqmode) != reqmode))
79366 + return 0;
79367 +
79368 + return reqmode;
79369 +}
79370 +
79371 +__u32
79372 +gr_acl_handle_creat(const struct dentry * dentry,
79373 + const struct dentry * p_dentry,
79374 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
79375 + const int imode)
79376 +{
79377 + __u32 reqmode = GR_WRITE | GR_CREATE;
79378 + __u32 mode;
79379 +
79380 + if (acc_mode & MAY_APPEND)
79381 + reqmode |= GR_APPEND;
79382 + // if a directory was required or the directory already exists, then
79383 + // don't count this open as a read
79384 + if ((acc_mode & MAY_READ) &&
79385 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
79386 + reqmode |= GR_READ;
79387 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
79388 + reqmode |= GR_SETID;
79389 +
79390 + mode =
79391 + gr_check_create(dentry, p_dentry, p_mnt,
79392 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
79393 +
79394 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79395 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
79396 + reqmode & GR_READ ? " reading" : "",
79397 + reqmode & GR_WRITE ? " writing" : reqmode &
79398 + GR_APPEND ? " appending" : "");
79399 + return reqmode;
79400 + } else
79401 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79402 + {
79403 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
79404 + reqmode & GR_READ ? " reading" : "",
79405 + reqmode & GR_WRITE ? " writing" : reqmode &
79406 + GR_APPEND ? " appending" : "");
79407 + return 0;
79408 + } else if (unlikely((mode & reqmode) != reqmode))
79409 + return 0;
79410 +
79411 + return reqmode;
79412 +}
79413 +
79414 +__u32
79415 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
79416 + const int fmode)
79417 +{
79418 + __u32 mode, reqmode = GR_FIND;
79419 +
79420 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
79421 + reqmode |= GR_EXEC;
79422 + if (fmode & S_IWOTH)
79423 + reqmode |= GR_WRITE;
79424 + if (fmode & S_IROTH)
79425 + reqmode |= GR_READ;
79426 +
79427 + mode =
79428 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
79429 + mnt);
79430 +
79431 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79432 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
79433 + reqmode & GR_READ ? " reading" : "",
79434 + reqmode & GR_WRITE ? " writing" : "",
79435 + reqmode & GR_EXEC ? " executing" : "");
79436 + return reqmode;
79437 + } else
79438 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79439 + {
79440 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
79441 + reqmode & GR_READ ? " reading" : "",
79442 + reqmode & GR_WRITE ? " writing" : "",
79443 + reqmode & GR_EXEC ? " executing" : "");
79444 + return 0;
79445 + } else if (unlikely((mode & reqmode) != reqmode))
79446 + return 0;
79447 +
79448 + return reqmode;
79449 +}
79450 +
79451 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
79452 +{
79453 + __u32 mode;
79454 +
79455 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
79456 +
79457 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
79458 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
79459 + return mode;
79460 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
79461 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
79462 + return 0;
79463 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
79464 + return 0;
79465 +
79466 + return (reqmode);
79467 +}
79468 +
79469 +__u32
79470 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
79471 +{
79472 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
79473 +}
79474 +
79475 +__u32
79476 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
79477 +{
79478 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
79479 +}
79480 +
79481 +__u32
79482 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
79483 +{
79484 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
79485 +}
79486 +
79487 +__u32
79488 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
79489 +{
79490 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
79491 +}
79492 +
79493 +__u32
79494 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
79495 + umode_t *modeptr)
79496 +{
79497 + mode_t mode;
79498 +
79499 + *modeptr &= ~(mode_t)gr_acl_umask();
79500 + mode = *modeptr;
79501 +
79502 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
79503 + return 1;
79504 +
79505 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
79506 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
79507 + GR_CHMOD_ACL_MSG);
79508 + } else {
79509 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
79510 + }
79511 +}
79512 +
79513 +__u32
79514 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
79515 +{
79516 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
79517 +}
79518 +
79519 +__u32
79520 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
79521 +{
79522 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
79523 +}
79524 +
79525 +__u32
79526 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
79527 +{
79528 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
79529 +}
79530 +
79531 +__u32
79532 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
79533 +{
79534 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
79535 + GR_UNIXCONNECT_ACL_MSG);
79536 +}
79537 +
79538 +/* hardlinks require at minimum create and link permission,
79539 + any additional privilege required is based on the
79540 + privilege of the file being linked to
79541 +*/
79542 +__u32
79543 +gr_acl_handle_link(const struct dentry * new_dentry,
79544 + const struct dentry * parent_dentry,
79545 + const struct vfsmount * parent_mnt,
79546 + const struct dentry * old_dentry,
79547 + const struct vfsmount * old_mnt, const char *to)
79548 +{
79549 + __u32 mode;
79550 + __u32 needmode = GR_CREATE | GR_LINK;
79551 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
79552 +
79553 + mode =
79554 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
79555 + old_mnt);
79556 +
79557 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
79558 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
79559 + return mode;
79560 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
79561 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
79562 + return 0;
79563 + } else if (unlikely((mode & needmode) != needmode))
79564 + return 0;
79565 +
79566 + return 1;
79567 +}
79568 +
79569 +__u32
79570 +gr_acl_handle_symlink(const struct dentry * new_dentry,
79571 + const struct dentry * parent_dentry,
79572 + const struct vfsmount * parent_mnt, const char *from)
79573 +{
79574 + __u32 needmode = GR_WRITE | GR_CREATE;
79575 + __u32 mode;
79576 +
79577 + mode =
79578 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
79579 + GR_CREATE | GR_AUDIT_CREATE |
79580 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
79581 +
79582 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
79583 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
79584 + return mode;
79585 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
79586 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
79587 + return 0;
79588 + } else if (unlikely((mode & needmode) != needmode))
79589 + return 0;
79590 +
79591 + return (GR_WRITE | GR_CREATE);
79592 +}
79593 +
79594 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
79595 +{
79596 + __u32 mode;
79597 +
79598 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
79599 +
79600 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
79601 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
79602 + return mode;
79603 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
79604 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
79605 + return 0;
79606 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
79607 + return 0;
79608 +
79609 + return (reqmode);
79610 +}
79611 +
79612 +__u32
79613 +gr_acl_handle_mknod(const struct dentry * new_dentry,
79614 + const struct dentry * parent_dentry,
79615 + const struct vfsmount * parent_mnt,
79616 + const int mode)
79617 +{
79618 + __u32 reqmode = GR_WRITE | GR_CREATE;
79619 + if (unlikely(mode & (S_ISUID | S_ISGID)))
79620 + reqmode |= GR_SETID;
79621 +
79622 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
79623 + reqmode, GR_MKNOD_ACL_MSG);
79624 +}
79625 +
79626 +__u32
79627 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
79628 + const struct dentry *parent_dentry,
79629 + const struct vfsmount *parent_mnt)
79630 +{
79631 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
79632 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
79633 +}
79634 +
79635 +#define RENAME_CHECK_SUCCESS(old, new) \
79636 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
79637 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
79638 +
79639 +int
79640 +gr_acl_handle_rename(struct dentry *new_dentry,
79641 + struct dentry *parent_dentry,
79642 + const struct vfsmount *parent_mnt,
79643 + struct dentry *old_dentry,
79644 + struct inode *old_parent_inode,
79645 + struct vfsmount *old_mnt, const char *newname)
79646 +{
79647 + __u32 comp1, comp2;
79648 + int error = 0;
79649 +
79650 + if (unlikely(!gr_acl_is_enabled()))
79651 + return 0;
79652 +
79653 + if (!new_dentry->d_inode) {
79654 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
79655 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
79656 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
79657 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
79658 + GR_DELETE | GR_AUDIT_DELETE |
79659 + GR_AUDIT_READ | GR_AUDIT_WRITE |
79660 + GR_SUPPRESS, old_mnt);
79661 + } else {
79662 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
79663 + GR_CREATE | GR_DELETE |
79664 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
79665 + GR_AUDIT_READ | GR_AUDIT_WRITE |
79666 + GR_SUPPRESS, parent_mnt);
79667 + comp2 =
79668 + gr_search_file(old_dentry,
79669 + GR_READ | GR_WRITE | GR_AUDIT_READ |
79670 + GR_DELETE | GR_AUDIT_DELETE |
79671 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
79672 + }
79673 +
79674 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
79675 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
79676 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
79677 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
79678 + && !(comp2 & GR_SUPPRESS)) {
79679 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
79680 + error = -EACCES;
79681 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
79682 + error = -EACCES;
79683 +
79684 + return error;
79685 +}
79686 +
79687 +void
79688 +gr_acl_handle_exit(void)
79689 +{
79690 + u16 id;
79691 + char *rolename;
79692 + struct file *exec_file;
79693 +
79694 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
79695 + !(current->role->roletype & GR_ROLE_PERSIST))) {
79696 + id = current->acl_role_id;
79697 + rolename = current->role->rolename;
79698 + gr_set_acls(1);
79699 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
79700 + }
79701 +
79702 + write_lock(&grsec_exec_file_lock);
79703 + exec_file = current->exec_file;
79704 + current->exec_file = NULL;
79705 + write_unlock(&grsec_exec_file_lock);
79706 +
79707 + if (exec_file)
79708 + fput(exec_file);
79709 +}
79710 +
79711 +int
79712 +gr_acl_handle_procpidmem(const struct task_struct *task)
79713 +{
79714 + if (unlikely(!gr_acl_is_enabled()))
79715 + return 0;
79716 +
79717 + if (task != current && task->acl->mode & GR_PROTPROCFD)
79718 + return -EACCES;
79719 +
79720 + return 0;
79721 +}
79722 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
79723 new file mode 100644
79724 index 0000000..cd07b96
79725 --- /dev/null
79726 +++ b/grsecurity/gracl_ip.c
79727 @@ -0,0 +1,382 @@
79728 +#include <linux/kernel.h>
79729 +#include <asm/uaccess.h>
79730 +#include <asm/errno.h>
79731 +#include <net/sock.h>
79732 +#include <linux/file.h>
79733 +#include <linux/fs.h>
79734 +#include <linux/net.h>
79735 +#include <linux/in.h>
79736 +#include <linux/skbuff.h>
79737 +#include <linux/ip.h>
79738 +#include <linux/udp.h>
79739 +#include <linux/smp_lock.h>
79740 +#include <linux/types.h>
79741 +#include <linux/sched.h>
79742 +#include <linux/netdevice.h>
79743 +#include <linux/inetdevice.h>
79744 +#include <linux/gracl.h>
79745 +#include <linux/grsecurity.h>
79746 +#include <linux/grinternal.h>
79747 +
79748 +#define GR_BIND 0x01
79749 +#define GR_CONNECT 0x02
79750 +#define GR_INVERT 0x04
79751 +#define GR_BINDOVERRIDE 0x08
79752 +#define GR_CONNECTOVERRIDE 0x10
79753 +#define GR_SOCK_FAMILY 0x20
79754 +
79755 +static const char * gr_protocols[IPPROTO_MAX] = {
79756 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
79757 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
79758 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
79759 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
79760 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
79761 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
79762 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
79763 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
79764 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
79765 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
79766 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
79767 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
79768 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
79769 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
79770 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
79771 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
79772 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
79773 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
79774 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
79775 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
79776 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
79777 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
79778 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
79779 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
79780 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
79781 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
79782 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
79783 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
79784 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
79785 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
79786 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
79787 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
79788 + };
79789 +
79790 +static const char * gr_socktypes[SOCK_MAX] = {
79791 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
79792 + "unknown:7", "unknown:8", "unknown:9", "packet"
79793 + };
79794 +
79795 +static const char * gr_sockfamilies[AF_MAX+1] = {
79796 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
79797 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
79798 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
79799 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
79800 + };
79801 +
79802 +const char *
79803 +gr_proto_to_name(unsigned char proto)
79804 +{
79805 + return gr_protocols[proto];
79806 +}
79807 +
79808 +const char *
79809 +gr_socktype_to_name(unsigned char type)
79810 +{
79811 + return gr_socktypes[type];
79812 +}
79813 +
79814 +const char *
79815 +gr_sockfamily_to_name(unsigned char family)
79816 +{
79817 + return gr_sockfamilies[family];
79818 +}
79819 +
79820 +int
79821 +gr_search_socket(const int domain, const int type, const int protocol)
79822 +{
79823 + struct acl_subject_label *curr;
79824 + const struct cred *cred = current_cred();
79825 +
79826 + if (unlikely(!gr_acl_is_enabled()))
79827 + goto exit;
79828 +
79829 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
79830 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
79831 + goto exit; // let the kernel handle it
79832 +
79833 + curr = current->acl;
79834 +
79835 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
79836 + /* the family is allowed, if this is PF_INET allow it only if
79837 + the extra sock type/protocol checks pass */
79838 + if (domain == PF_INET)
79839 + goto inet_check;
79840 + goto exit;
79841 + } else {
79842 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79843 + __u32 fakeip = 0;
79844 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79845 + current->role->roletype, cred->uid,
79846 + cred->gid, current->exec_file ?
79847 + gr_to_filename(current->exec_file->f_path.dentry,
79848 + current->exec_file->f_path.mnt) :
79849 + curr->filename, curr->filename,
79850 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
79851 + &current->signal->saved_ip);
79852 + goto exit;
79853 + }
79854 + goto exit_fail;
79855 + }
79856 +
79857 +inet_check:
79858 + /* the rest of this checking is for IPv4 only */
79859 + if (!curr->ips)
79860 + goto exit;
79861 +
79862 + if ((curr->ip_type & (1 << type)) &&
79863 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
79864 + goto exit;
79865 +
79866 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79867 + /* we don't place acls on raw sockets , and sometimes
79868 + dgram/ip sockets are opened for ioctl and not
79869 + bind/connect, so we'll fake a bind learn log */
79870 + if (type == SOCK_RAW || type == SOCK_PACKET) {
79871 + __u32 fakeip = 0;
79872 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79873 + current->role->roletype, cred->uid,
79874 + cred->gid, current->exec_file ?
79875 + gr_to_filename(current->exec_file->f_path.dentry,
79876 + current->exec_file->f_path.mnt) :
79877 + curr->filename, curr->filename,
79878 + &fakeip, 0, type,
79879 + protocol, GR_CONNECT, &current->signal->saved_ip);
79880 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
79881 + __u32 fakeip = 0;
79882 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79883 + current->role->roletype, cred->uid,
79884 + cred->gid, current->exec_file ?
79885 + gr_to_filename(current->exec_file->f_path.dentry,
79886 + current->exec_file->f_path.mnt) :
79887 + curr->filename, curr->filename,
79888 + &fakeip, 0, type,
79889 + protocol, GR_BIND, &current->signal->saved_ip);
79890 + }
79891 + /* we'll log when they use connect or bind */
79892 + goto exit;
79893 + }
79894 +
79895 +exit_fail:
79896 + if (domain == PF_INET)
79897 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
79898 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
79899 + else
79900 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
79901 + gr_socktype_to_name(type), protocol);
79902 +
79903 + return 0;
79904 +exit:
79905 + return 1;
79906 +}
79907 +
79908 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
79909 +{
79910 + if ((ip->mode & mode) &&
79911 + (ip_port >= ip->low) &&
79912 + (ip_port <= ip->high) &&
79913 + ((ntohl(ip_addr) & our_netmask) ==
79914 + (ntohl(our_addr) & our_netmask))
79915 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
79916 + && (ip->type & (1 << type))) {
79917 + if (ip->mode & GR_INVERT)
79918 + return 2; // specifically denied
79919 + else
79920 + return 1; // allowed
79921 + }
79922 +
79923 + return 0; // not specifically allowed, may continue parsing
79924 +}
79925 +
79926 +static int
79927 +gr_search_connectbind(const int full_mode, struct sock *sk,
79928 + struct sockaddr_in *addr, const int type)
79929 +{
79930 + char iface[IFNAMSIZ] = {0};
79931 + struct acl_subject_label *curr;
79932 + struct acl_ip_label *ip;
79933 + struct inet_sock *isk;
79934 + struct net_device *dev;
79935 + struct in_device *idev;
79936 + unsigned long i;
79937 + int ret;
79938 + int mode = full_mode & (GR_BIND | GR_CONNECT);
79939 + __u32 ip_addr = 0;
79940 + __u32 our_addr;
79941 + __u32 our_netmask;
79942 + char *p;
79943 + __u16 ip_port = 0;
79944 + const struct cred *cred = current_cred();
79945 +
79946 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
79947 + return 0;
79948 +
79949 + curr = current->acl;
79950 + isk = inet_sk(sk);
79951 +
79952 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
79953 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
79954 + addr->sin_addr.s_addr = curr->inaddr_any_override;
79955 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
79956 + struct sockaddr_in saddr;
79957 + int err;
79958 +
79959 + saddr.sin_family = AF_INET;
79960 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
79961 + saddr.sin_port = isk->sport;
79962 +
79963 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
79964 + if (err)
79965 + return err;
79966 +
79967 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
79968 + if (err)
79969 + return err;
79970 + }
79971 +
79972 + if (!curr->ips)
79973 + return 0;
79974 +
79975 + ip_addr = addr->sin_addr.s_addr;
79976 + ip_port = ntohs(addr->sin_port);
79977 +
79978 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79979 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79980 + current->role->roletype, cred->uid,
79981 + cred->gid, current->exec_file ?
79982 + gr_to_filename(current->exec_file->f_path.dentry,
79983 + current->exec_file->f_path.mnt) :
79984 + curr->filename, curr->filename,
79985 + &ip_addr, ip_port, type,
79986 + sk->sk_protocol, mode, &current->signal->saved_ip);
79987 + return 0;
79988 + }
79989 +
79990 + for (i = 0; i < curr->ip_num; i++) {
79991 + ip = *(curr->ips + i);
79992 + if (ip->iface != NULL) {
79993 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
79994 + p = strchr(iface, ':');
79995 + if (p != NULL)
79996 + *p = '\0';
79997 + dev = dev_get_by_name(sock_net(sk), iface);
79998 + if (dev == NULL)
79999 + continue;
80000 + idev = in_dev_get(dev);
80001 + if (idev == NULL) {
80002 + dev_put(dev);
80003 + continue;
80004 + }
80005 + rcu_read_lock();
80006 + for_ifa(idev) {
80007 + if (!strcmp(ip->iface, ifa->ifa_label)) {
80008 + our_addr = ifa->ifa_address;
80009 + our_netmask = 0xffffffff;
80010 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
80011 + if (ret == 1) {
80012 + rcu_read_unlock();
80013 + in_dev_put(idev);
80014 + dev_put(dev);
80015 + return 0;
80016 + } else if (ret == 2) {
80017 + rcu_read_unlock();
80018 + in_dev_put(idev);
80019 + dev_put(dev);
80020 + goto denied;
80021 + }
80022 + }
80023 + } endfor_ifa(idev);
80024 + rcu_read_unlock();
80025 + in_dev_put(idev);
80026 + dev_put(dev);
80027 + } else {
80028 + our_addr = ip->addr;
80029 + our_netmask = ip->netmask;
80030 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
80031 + if (ret == 1)
80032 + return 0;
80033 + else if (ret == 2)
80034 + goto denied;
80035 + }
80036 + }
80037 +
80038 +denied:
80039 + if (mode == GR_BIND)
80040 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
80041 + else if (mode == GR_CONNECT)
80042 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
80043 +
80044 + return -EACCES;
80045 +}
80046 +
80047 +int
80048 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
80049 +{
80050 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
80051 +}
80052 +
80053 +int
80054 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
80055 +{
80056 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
80057 +}
80058 +
80059 +int gr_search_listen(struct socket *sock)
80060 +{
80061 + struct sock *sk = sock->sk;
80062 + struct sockaddr_in addr;
80063 +
80064 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
80065 + addr.sin_port = inet_sk(sk)->sport;
80066 +
80067 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
80068 +}
80069 +
80070 +int gr_search_accept(struct socket *sock)
80071 +{
80072 + struct sock *sk = sock->sk;
80073 + struct sockaddr_in addr;
80074 +
80075 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
80076 + addr.sin_port = inet_sk(sk)->sport;
80077 +
80078 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
80079 +}
80080 +
80081 +int
80082 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
80083 +{
80084 + if (addr)
80085 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
80086 + else {
80087 + struct sockaddr_in sin;
80088 + const struct inet_sock *inet = inet_sk(sk);
80089 +
80090 + sin.sin_addr.s_addr = inet->daddr;
80091 + sin.sin_port = inet->dport;
80092 +
80093 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
80094 + }
80095 +}
80096 +
80097 +int
80098 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
80099 +{
80100 + struct sockaddr_in sin;
80101 +
80102 + if (unlikely(skb->len < sizeof (struct udphdr)))
80103 + return 0; // skip this packet
80104 +
80105 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
80106 + sin.sin_port = udp_hdr(skb)->source;
80107 +
80108 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
80109 +}
80110 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
80111 new file mode 100644
80112 index 0000000..34bdd46
80113 --- /dev/null
80114 +++ b/grsecurity/gracl_learn.c
80115 @@ -0,0 +1,208 @@
80116 +#include <linux/kernel.h>
80117 +#include <linux/mm.h>
80118 +#include <linux/sched.h>
80119 +#include <linux/poll.h>
80120 +#include <linux/smp_lock.h>
80121 +#include <linux/string.h>
80122 +#include <linux/file.h>
80123 +#include <linux/types.h>
80124 +#include <linux/vmalloc.h>
80125 +#include <linux/grinternal.h>
80126 +
80127 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
80128 + size_t count, loff_t *ppos);
80129 +extern int gr_acl_is_enabled(void);
80130 +
80131 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
80132 +static int gr_learn_attached;
80133 +
80134 +/* use a 512k buffer */
80135 +#define LEARN_BUFFER_SIZE (512 * 1024)
80136 +
80137 +static DEFINE_SPINLOCK(gr_learn_lock);
80138 +static DEFINE_MUTEX(gr_learn_user_mutex);
80139 +
80140 +/* we need to maintain two buffers, so that the kernel context of grlearn
80141 + uses a semaphore around the userspace copying, and the other kernel contexts
80142 + use a spinlock when copying into the buffer, since they cannot sleep
80143 +*/
80144 +static char *learn_buffer;
80145 +static char *learn_buffer_user;
80146 +static int learn_buffer_len;
80147 +static int learn_buffer_user_len;
80148 +
80149 +static ssize_t
80150 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
80151 +{
80152 + DECLARE_WAITQUEUE(wait, current);
80153 + ssize_t retval = 0;
80154 +
80155 + add_wait_queue(&learn_wait, &wait);
80156 + set_current_state(TASK_INTERRUPTIBLE);
80157 + do {
80158 + mutex_lock(&gr_learn_user_mutex);
80159 + spin_lock(&gr_learn_lock);
80160 + if (learn_buffer_len)
80161 + break;
80162 + spin_unlock(&gr_learn_lock);
80163 + mutex_unlock(&gr_learn_user_mutex);
80164 + if (file->f_flags & O_NONBLOCK) {
80165 + retval = -EAGAIN;
80166 + goto out;
80167 + }
80168 + if (signal_pending(current)) {
80169 + retval = -ERESTARTSYS;
80170 + goto out;
80171 + }
80172 +
80173 + schedule();
80174 + } while (1);
80175 +
80176 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
80177 + learn_buffer_user_len = learn_buffer_len;
80178 + retval = learn_buffer_len;
80179 + learn_buffer_len = 0;
80180 +
80181 + spin_unlock(&gr_learn_lock);
80182 +
80183 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
80184 + retval = -EFAULT;
80185 +
80186 + mutex_unlock(&gr_learn_user_mutex);
80187 +out:
80188 + set_current_state(TASK_RUNNING);
80189 + remove_wait_queue(&learn_wait, &wait);
80190 + return retval;
80191 +}
80192 +
80193 +static unsigned int
80194 +poll_learn(struct file * file, poll_table * wait)
80195 +{
80196 + poll_wait(file, &learn_wait, wait);
80197 +
80198 + if (learn_buffer_len)
80199 + return (POLLIN | POLLRDNORM);
80200 +
80201 + return 0;
80202 +}
80203 +
80204 +void
80205 +gr_clear_learn_entries(void)
80206 +{
80207 + char *tmp;
80208 +
80209 + mutex_lock(&gr_learn_user_mutex);
80210 + spin_lock(&gr_learn_lock);
80211 + tmp = learn_buffer;
80212 + learn_buffer = NULL;
80213 + spin_unlock(&gr_learn_lock);
80214 + if (tmp)
80215 + vfree(tmp);
80216 + if (learn_buffer_user != NULL) {
80217 + vfree(learn_buffer_user);
80218 + learn_buffer_user = NULL;
80219 + }
80220 + learn_buffer_len = 0;
80221 + mutex_unlock(&gr_learn_user_mutex);
80222 +
80223 + return;
80224 +}
80225 +
80226 +void
80227 +gr_add_learn_entry(const char *fmt, ...)
80228 +{
80229 + va_list args;
80230 + unsigned int len;
80231 +
80232 + if (!gr_learn_attached)
80233 + return;
80234 +
80235 + spin_lock(&gr_learn_lock);
80236 +
80237 + /* leave a gap at the end so we know when it's "full" but don't have to
80238 + compute the exact length of the string we're trying to append
80239 + */
80240 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
80241 + spin_unlock(&gr_learn_lock);
80242 + wake_up_interruptible(&learn_wait);
80243 + return;
80244 + }
80245 + if (learn_buffer == NULL) {
80246 + spin_unlock(&gr_learn_lock);
80247 + return;
80248 + }
80249 +
80250 + va_start(args, fmt);
80251 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
80252 + va_end(args);
80253 +
80254 + learn_buffer_len += len + 1;
80255 +
80256 + spin_unlock(&gr_learn_lock);
80257 + wake_up_interruptible(&learn_wait);
80258 +
80259 + return;
80260 +}
80261 +
80262 +static int
80263 +open_learn(struct inode *inode, struct file *file)
80264 +{
80265 + if (file->f_mode & FMODE_READ && gr_learn_attached)
80266 + return -EBUSY;
80267 + if (file->f_mode & FMODE_READ) {
80268 + int retval = 0;
80269 + mutex_lock(&gr_learn_user_mutex);
80270 + if (learn_buffer == NULL)
80271 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
80272 + if (learn_buffer_user == NULL)
80273 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
80274 + if (learn_buffer == NULL) {
80275 + retval = -ENOMEM;
80276 + goto out_error;
80277 + }
80278 + if (learn_buffer_user == NULL) {
80279 + retval = -ENOMEM;
80280 + goto out_error;
80281 + }
80282 + learn_buffer_len = 0;
80283 + learn_buffer_user_len = 0;
80284 + gr_learn_attached = 1;
80285 +out_error:
80286 + mutex_unlock(&gr_learn_user_mutex);
80287 + return retval;
80288 + }
80289 + return 0;
80290 +}
80291 +
80292 +static int
80293 +close_learn(struct inode *inode, struct file *file)
80294 +{
80295 + if (file->f_mode & FMODE_READ) {
80296 + char *tmp = NULL;
80297 + mutex_lock(&gr_learn_user_mutex);
80298 + spin_lock(&gr_learn_lock);
80299 + tmp = learn_buffer;
80300 + learn_buffer = NULL;
80301 + spin_unlock(&gr_learn_lock);
80302 + if (tmp)
80303 + vfree(tmp);
80304 + if (learn_buffer_user != NULL) {
80305 + vfree(learn_buffer_user);
80306 + learn_buffer_user = NULL;
80307 + }
80308 + learn_buffer_len = 0;
80309 + learn_buffer_user_len = 0;
80310 + gr_learn_attached = 0;
80311 + mutex_unlock(&gr_learn_user_mutex);
80312 + }
80313 +
80314 + return 0;
80315 +}
80316 +
80317 +const struct file_operations grsec_fops = {
80318 + .read = read_learn,
80319 + .write = write_grsec_handler,
80320 + .open = open_learn,
80321 + .release = close_learn,
80322 + .poll = poll_learn,
80323 +};
80324 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
80325 new file mode 100644
80326 index 0000000..70b2179
80327 --- /dev/null
80328 +++ b/grsecurity/gracl_res.c
80329 @@ -0,0 +1,67 @@
80330 +#include <linux/kernel.h>
80331 +#include <linux/sched.h>
80332 +#include <linux/gracl.h>
80333 +#include <linux/grinternal.h>
80334 +
80335 +static const char *restab_log[] = {
80336 + [RLIMIT_CPU] = "RLIMIT_CPU",
80337 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
80338 + [RLIMIT_DATA] = "RLIMIT_DATA",
80339 + [RLIMIT_STACK] = "RLIMIT_STACK",
80340 + [RLIMIT_CORE] = "RLIMIT_CORE",
80341 + [RLIMIT_RSS] = "RLIMIT_RSS",
80342 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
80343 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
80344 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
80345 + [RLIMIT_AS] = "RLIMIT_AS",
80346 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
80347 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
80348 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
80349 + [RLIMIT_NICE] = "RLIMIT_NICE",
80350 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
80351 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
80352 + [GR_CRASH_RES] = "RLIMIT_CRASH"
80353 +};
80354 +
80355 +void
80356 +gr_log_resource(const struct task_struct *task,
80357 + const int res, const unsigned long wanted, const int gt)
80358 +{
80359 + const struct cred *cred;
80360 + unsigned long rlim;
80361 +
80362 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
80363 + return;
80364 +
80365 + // not yet supported resource
80366 + if (unlikely(!restab_log[res]))
80367 + return;
80368 +
80369 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
80370 + rlim = task->signal->rlim[res].rlim_max;
80371 + else
80372 + rlim = task->signal->rlim[res].rlim_cur;
80373 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
80374 + return;
80375 +
80376 + rcu_read_lock();
80377 + cred = __task_cred(task);
80378 +
80379 + if (res == RLIMIT_NPROC &&
80380 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
80381 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
80382 + goto out_rcu_unlock;
80383 + else if (res == RLIMIT_MEMLOCK &&
80384 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
80385 + goto out_rcu_unlock;
80386 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
80387 + goto out_rcu_unlock;
80388 + rcu_read_unlock();
80389 +
80390 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
80391 +
80392 + return;
80393 +out_rcu_unlock:
80394 + rcu_read_unlock();
80395 + return;
80396 +}
80397 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
80398 new file mode 100644
80399 index 0000000..1d1b734
80400 --- /dev/null
80401 +++ b/grsecurity/gracl_segv.c
80402 @@ -0,0 +1,284 @@
80403 +#include <linux/kernel.h>
80404 +#include <linux/mm.h>
80405 +#include <asm/uaccess.h>
80406 +#include <asm/errno.h>
80407 +#include <asm/mman.h>
80408 +#include <net/sock.h>
80409 +#include <linux/file.h>
80410 +#include <linux/fs.h>
80411 +#include <linux/net.h>
80412 +#include <linux/in.h>
80413 +#include <linux/smp_lock.h>
80414 +#include <linux/slab.h>
80415 +#include <linux/types.h>
80416 +#include <linux/sched.h>
80417 +#include <linux/timer.h>
80418 +#include <linux/gracl.h>
80419 +#include <linux/grsecurity.h>
80420 +#include <linux/grinternal.h>
80421 +
80422 +static struct crash_uid *uid_set;
80423 +static unsigned short uid_used;
80424 +static DEFINE_SPINLOCK(gr_uid_lock);
80425 +extern rwlock_t gr_inode_lock;
80426 +extern struct acl_subject_label *
80427 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
80428 + struct acl_role_label *role);
80429 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
80430 +
80431 +int
80432 +gr_init_uidset(void)
80433 +{
80434 + uid_set =
80435 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
80436 + uid_used = 0;
80437 +
80438 + return uid_set ? 1 : 0;
80439 +}
80440 +
80441 +void
80442 +gr_free_uidset(void)
80443 +{
80444 + if (uid_set)
80445 + kfree(uid_set);
80446 +
80447 + return;
80448 +}
80449 +
80450 +int
80451 +gr_find_uid(const uid_t uid)
80452 +{
80453 + struct crash_uid *tmp = uid_set;
80454 + uid_t buid;
80455 + int low = 0, high = uid_used - 1, mid;
80456 +
80457 + while (high >= low) {
80458 + mid = (low + high) >> 1;
80459 + buid = tmp[mid].uid;
80460 + if (buid == uid)
80461 + return mid;
80462 + if (buid > uid)
80463 + high = mid - 1;
80464 + if (buid < uid)
80465 + low = mid + 1;
80466 + }
80467 +
80468 + return -1;
80469 +}
80470 +
80471 +static __inline__ void
80472 +gr_insertsort(void)
80473 +{
80474 + unsigned short i, j;
80475 + struct crash_uid index;
80476 +
80477 + for (i = 1; i < uid_used; i++) {
80478 + index = uid_set[i];
80479 + j = i;
80480 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
80481 + uid_set[j] = uid_set[j - 1];
80482 + j--;
80483 + }
80484 + uid_set[j] = index;
80485 + }
80486 +
80487 + return;
80488 +}
80489 +
80490 +static __inline__ void
80491 +gr_insert_uid(const uid_t uid, const unsigned long expires)
80492 +{
80493 + int loc;
80494 +
80495 + if (uid_used == GR_UIDTABLE_MAX)
80496 + return;
80497 +
80498 + loc = gr_find_uid(uid);
80499 +
80500 + if (loc >= 0) {
80501 + uid_set[loc].expires = expires;
80502 + return;
80503 + }
80504 +
80505 + uid_set[uid_used].uid = uid;
80506 + uid_set[uid_used].expires = expires;
80507 + uid_used++;
80508 +
80509 + gr_insertsort();
80510 +
80511 + return;
80512 +}
80513 +
80514 +void
80515 +gr_remove_uid(const unsigned short loc)
80516 +{
80517 + unsigned short i;
80518 +
80519 + for (i = loc + 1; i < uid_used; i++)
80520 + uid_set[i - 1] = uid_set[i];
80521 +
80522 + uid_used--;
80523 +
80524 + return;
80525 +}
80526 +
80527 +int
80528 +gr_check_crash_uid(const uid_t uid)
80529 +{
80530 + int loc;
80531 + int ret = 0;
80532 +
80533 + if (unlikely(!gr_acl_is_enabled()))
80534 + return 0;
80535 +
80536 + spin_lock(&gr_uid_lock);
80537 + loc = gr_find_uid(uid);
80538 +
80539 + if (loc < 0)
80540 + goto out_unlock;
80541 +
80542 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
80543 + gr_remove_uid(loc);
80544 + else
80545 + ret = 1;
80546 +
80547 +out_unlock:
80548 + spin_unlock(&gr_uid_lock);
80549 + return ret;
80550 +}
80551 +
80552 +static __inline__ int
80553 +proc_is_setxid(const struct cred *cred)
80554 +{
80555 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
80556 + cred->uid != cred->fsuid)
80557 + return 1;
80558 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
80559 + cred->gid != cred->fsgid)
80560 + return 1;
80561 +
80562 + return 0;
80563 +}
80564 +
80565 +void
80566 +gr_handle_crash(struct task_struct *task, const int sig)
80567 +{
80568 + struct acl_subject_label *curr;
80569 + struct task_struct *tsk, *tsk2;
80570 + const struct cred *cred;
80571 + const struct cred *cred2;
80572 +
80573 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
80574 + return;
80575 +
80576 + if (unlikely(!gr_acl_is_enabled()))
80577 + return;
80578 +
80579 + curr = task->acl;
80580 +
80581 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
80582 + return;
80583 +
80584 + if (time_before_eq(curr->expires, get_seconds())) {
80585 + curr->expires = 0;
80586 + curr->crashes = 0;
80587 + }
80588 +
80589 + curr->crashes++;
80590 +
80591 + if (!curr->expires)
80592 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
80593 +
80594 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
80595 + time_after(curr->expires, get_seconds())) {
80596 + rcu_read_lock();
80597 + cred = __task_cred(task);
80598 + if (cred->uid && proc_is_setxid(cred)) {
80599 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
80600 + spin_lock(&gr_uid_lock);
80601 + gr_insert_uid(cred->uid, curr->expires);
80602 + spin_unlock(&gr_uid_lock);
80603 + curr->expires = 0;
80604 + curr->crashes = 0;
80605 + read_lock(&tasklist_lock);
80606 + do_each_thread(tsk2, tsk) {
80607 + cred2 = __task_cred(tsk);
80608 + if (tsk != task && cred2->uid == cred->uid)
80609 + gr_fake_force_sig(SIGKILL, tsk);
80610 + } while_each_thread(tsk2, tsk);
80611 + read_unlock(&tasklist_lock);
80612 + } else {
80613 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
80614 + read_lock(&tasklist_lock);
80615 + read_lock(&grsec_exec_file_lock);
80616 + do_each_thread(tsk2, tsk) {
80617 + if (likely(tsk != task)) {
80618 + // if this thread has the same subject as the one that triggered
80619 + // RES_CRASH and it's the same binary, kill it
80620 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
80621 + gr_fake_force_sig(SIGKILL, tsk);
80622 + }
80623 + } while_each_thread(tsk2, tsk);
80624 + read_unlock(&grsec_exec_file_lock);
80625 + read_unlock(&tasklist_lock);
80626 + }
80627 + rcu_read_unlock();
80628 + }
80629 +
80630 + return;
80631 +}
80632 +
80633 +int
80634 +gr_check_crash_exec(const struct file *filp)
80635 +{
80636 + struct acl_subject_label *curr;
80637 +
80638 + if (unlikely(!gr_acl_is_enabled()))
80639 + return 0;
80640 +
80641 + read_lock(&gr_inode_lock);
80642 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
80643 + filp->f_path.dentry->d_inode->i_sb->s_dev,
80644 + current->role);
80645 + read_unlock(&gr_inode_lock);
80646 +
80647 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
80648 + (!curr->crashes && !curr->expires))
80649 + return 0;
80650 +
80651 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
80652 + time_after(curr->expires, get_seconds()))
80653 + return 1;
80654 + else if (time_before_eq(curr->expires, get_seconds())) {
80655 + curr->crashes = 0;
80656 + curr->expires = 0;
80657 + }
80658 +
80659 + return 0;
80660 +}
80661 +
80662 +void
80663 +gr_handle_alertkill(struct task_struct *task)
80664 +{
80665 + struct acl_subject_label *curracl;
80666 + __u32 curr_ip;
80667 + struct task_struct *p, *p2;
80668 +
80669 + if (unlikely(!gr_acl_is_enabled()))
80670 + return;
80671 +
80672 + curracl = task->acl;
80673 + curr_ip = task->signal->curr_ip;
80674 +
80675 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
80676 + read_lock(&tasklist_lock);
80677 + do_each_thread(p2, p) {
80678 + if (p->signal->curr_ip == curr_ip)
80679 + gr_fake_force_sig(SIGKILL, p);
80680 + } while_each_thread(p2, p);
80681 + read_unlock(&tasklist_lock);
80682 + } else if (curracl->mode & GR_KILLPROC)
80683 + gr_fake_force_sig(SIGKILL, task);
80684 +
80685 + return;
80686 +}
80687 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
80688 new file mode 100644
80689 index 0000000..9d83a69
80690 --- /dev/null
80691 +++ b/grsecurity/gracl_shm.c
80692 @@ -0,0 +1,40 @@
80693 +#include <linux/kernel.h>
80694 +#include <linux/mm.h>
80695 +#include <linux/sched.h>
80696 +#include <linux/file.h>
80697 +#include <linux/ipc.h>
80698 +#include <linux/gracl.h>
80699 +#include <linux/grsecurity.h>
80700 +#include <linux/grinternal.h>
80701 +
80702 +int
80703 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
80704 + const time_t shm_createtime, const uid_t cuid, const int shmid)
80705 +{
80706 + struct task_struct *task;
80707 +
80708 + if (!gr_acl_is_enabled())
80709 + return 1;
80710 +
80711 + rcu_read_lock();
80712 + read_lock(&tasklist_lock);
80713 +
80714 + task = find_task_by_vpid(shm_cprid);
80715 +
80716 + if (unlikely(!task))
80717 + task = find_task_by_vpid(shm_lapid);
80718 +
80719 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
80720 + (task->pid == shm_lapid)) &&
80721 + (task->acl->mode & GR_PROTSHM) &&
80722 + (task->acl != current->acl))) {
80723 + read_unlock(&tasklist_lock);
80724 + rcu_read_unlock();
80725 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
80726 + return 0;
80727 + }
80728 + read_unlock(&tasklist_lock);
80729 + rcu_read_unlock();
80730 +
80731 + return 1;
80732 +}
80733 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
80734 new file mode 100644
80735 index 0000000..bc0be01
80736 --- /dev/null
80737 +++ b/grsecurity/grsec_chdir.c
80738 @@ -0,0 +1,19 @@
80739 +#include <linux/kernel.h>
80740 +#include <linux/sched.h>
80741 +#include <linux/fs.h>
80742 +#include <linux/file.h>
80743 +#include <linux/grsecurity.h>
80744 +#include <linux/grinternal.h>
80745 +
80746 +void
80747 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
80748 +{
80749 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
80750 + if ((grsec_enable_chdir && grsec_enable_group &&
80751 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
80752 + !grsec_enable_group)) {
80753 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
80754 + }
80755 +#endif
80756 + return;
80757 +}
80758 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
80759 new file mode 100644
80760 index 0000000..197bdd5
80761 --- /dev/null
80762 +++ b/grsecurity/grsec_chroot.c
80763 @@ -0,0 +1,386 @@
80764 +#include <linux/kernel.h>
80765 +#include <linux/module.h>
80766 +#include <linux/sched.h>
80767 +#include <linux/file.h>
80768 +#include <linux/fs.h>
80769 +#include <linux/mount.h>
80770 +#include <linux/types.h>
80771 +#include <linux/pid_namespace.h>
80772 +#include <linux/grsecurity.h>
80773 +#include <linux/grinternal.h>
80774 +
80775 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
80776 +{
80777 +#ifdef CONFIG_GRKERNSEC
80778 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
80779 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
80780 + task->gr_is_chrooted = 1;
80781 + else
80782 + task->gr_is_chrooted = 0;
80783 +
80784 + task->gr_chroot_dentry = path->dentry;
80785 +#endif
80786 + return;
80787 +}
80788 +
80789 +void gr_clear_chroot_entries(struct task_struct *task)
80790 +{
80791 +#ifdef CONFIG_GRKERNSEC
80792 + task->gr_is_chrooted = 0;
80793 + task->gr_chroot_dentry = NULL;
80794 +#endif
80795 + return;
80796 +}
80797 +
80798 +int
80799 +gr_handle_chroot_unix(const pid_t pid)
80800 +{
80801 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
80802 + struct task_struct *p;
80803 +
80804 + if (unlikely(!grsec_enable_chroot_unix))
80805 + return 1;
80806 +
80807 + if (likely(!proc_is_chrooted(current)))
80808 + return 1;
80809 +
80810 + rcu_read_lock();
80811 + read_lock(&tasklist_lock);
80812 +
80813 + p = find_task_by_vpid_unrestricted(pid);
80814 + if (unlikely(p && !have_same_root(current, p))) {
80815 + read_unlock(&tasklist_lock);
80816 + rcu_read_unlock();
80817 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
80818 + return 0;
80819 + }
80820 + read_unlock(&tasklist_lock);
80821 + rcu_read_unlock();
80822 +#endif
80823 + return 1;
80824 +}
80825 +
80826 +int
80827 +gr_handle_chroot_nice(void)
80828 +{
80829 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80830 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
80831 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
80832 + return -EPERM;
80833 + }
80834 +#endif
80835 + return 0;
80836 +}
80837 +
80838 +int
80839 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
80840 +{
80841 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80842 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
80843 + && proc_is_chrooted(current)) {
80844 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
80845 + return -EACCES;
80846 + }
80847 +#endif
80848 + return 0;
80849 +}
80850 +
80851 +int
80852 +gr_handle_chroot_rawio(const struct inode *inode)
80853 +{
80854 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80855 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
80856 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
80857 + return 1;
80858 +#endif
80859 + return 0;
80860 +}
80861 +
80862 +int
80863 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
80864 +{
80865 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80866 + struct task_struct *p;
80867 + int ret = 0;
80868 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
80869 + return ret;
80870 +
80871 + read_lock(&tasklist_lock);
80872 + do_each_pid_task(pid, type, p) {
80873 + if (!have_same_root(current, p)) {
80874 + ret = 1;
80875 + goto out;
80876 + }
80877 + } while_each_pid_task(pid, type, p);
80878 +out:
80879 + read_unlock(&tasklist_lock);
80880 + return ret;
80881 +#endif
80882 + return 0;
80883 +}
80884 +
80885 +int
80886 +gr_pid_is_chrooted(struct task_struct *p)
80887 +{
80888 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80889 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
80890 + return 0;
80891 +
80892 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
80893 + !have_same_root(current, p)) {
80894 + return 1;
80895 + }
80896 +#endif
80897 + return 0;
80898 +}
80899 +
80900 +EXPORT_SYMBOL(gr_pid_is_chrooted);
80901 +
80902 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
80903 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
80904 +{
80905 + struct dentry *dentry = (struct dentry *)u_dentry;
80906 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
80907 + struct dentry *realroot;
80908 + struct vfsmount *realrootmnt;
80909 + struct dentry *currentroot;
80910 + struct vfsmount *currentmnt;
80911 + struct task_struct *reaper = &init_task;
80912 + int ret = 1;
80913 +
80914 + read_lock(&reaper->fs->lock);
80915 + realrootmnt = mntget(reaper->fs->root.mnt);
80916 + realroot = dget(reaper->fs->root.dentry);
80917 + read_unlock(&reaper->fs->lock);
80918 +
80919 + read_lock(&current->fs->lock);
80920 + currentmnt = mntget(current->fs->root.mnt);
80921 + currentroot = dget(current->fs->root.dentry);
80922 + read_unlock(&current->fs->lock);
80923 +
80924 + spin_lock(&dcache_lock);
80925 + for (;;) {
80926 + if (unlikely((dentry == realroot && mnt == realrootmnt)
80927 + || (dentry == currentroot && mnt == currentmnt)))
80928 + break;
80929 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
80930 + if (mnt->mnt_parent == mnt)
80931 + break;
80932 + dentry = mnt->mnt_mountpoint;
80933 + mnt = mnt->mnt_parent;
80934 + continue;
80935 + }
80936 + dentry = dentry->d_parent;
80937 + }
80938 + spin_unlock(&dcache_lock);
80939 +
80940 + dput(currentroot);
80941 + mntput(currentmnt);
80942 +
80943 + /* access is outside of chroot */
80944 + if (dentry == realroot && mnt == realrootmnt)
80945 + ret = 0;
80946 +
80947 + dput(realroot);
80948 + mntput(realrootmnt);
80949 + return ret;
80950 +}
80951 +#endif
80952 +
80953 +int
80954 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
80955 +{
80956 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
80957 + if (!grsec_enable_chroot_fchdir)
80958 + return 1;
80959 +
80960 + if (!proc_is_chrooted(current))
80961 + return 1;
80962 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
80963 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
80964 + return 0;
80965 + }
80966 +#endif
80967 + return 1;
80968 +}
80969 +
80970 +int
80971 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
80972 + const time_t shm_createtime)
80973 +{
80974 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
80975 + struct task_struct *p;
80976 + time_t starttime;
80977 +
80978 + if (unlikely(!grsec_enable_chroot_shmat))
80979 + return 1;
80980 +
80981 + if (likely(!proc_is_chrooted(current)))
80982 + return 1;
80983 +
80984 + rcu_read_lock();
80985 + read_lock(&tasklist_lock);
80986 +
80987 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
80988 + starttime = p->start_time.tv_sec;
80989 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
80990 + if (have_same_root(current, p)) {
80991 + goto allow;
80992 + } else {
80993 + read_unlock(&tasklist_lock);
80994 + rcu_read_unlock();
80995 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
80996 + return 0;
80997 + }
80998 + }
80999 + /* creator exited, pid reuse, fall through to next check */
81000 + }
81001 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
81002 + if (unlikely(!have_same_root(current, p))) {
81003 + read_unlock(&tasklist_lock);
81004 + rcu_read_unlock();
81005 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
81006 + return 0;
81007 + }
81008 + }
81009 +
81010 +allow:
81011 + read_unlock(&tasklist_lock);
81012 + rcu_read_unlock();
81013 +#endif
81014 + return 1;
81015 +}
81016 +
81017 +void
81018 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
81019 +{
81020 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
81021 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
81022 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
81023 +#endif
81024 + return;
81025 +}
81026 +
81027 +int
81028 +gr_handle_chroot_mknod(const struct dentry *dentry,
81029 + const struct vfsmount *mnt, const int mode)
81030 +{
81031 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
81032 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
81033 + proc_is_chrooted(current)) {
81034 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
81035 + return -EPERM;
81036 + }
81037 +#endif
81038 + return 0;
81039 +}
81040 +
81041 +int
81042 +gr_handle_chroot_mount(const struct dentry *dentry,
81043 + const struct vfsmount *mnt, const char *dev_name)
81044 +{
81045 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
81046 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
81047 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
81048 + return -EPERM;
81049 + }
81050 +#endif
81051 + return 0;
81052 +}
81053 +
81054 +int
81055 +gr_handle_chroot_pivot(void)
81056 +{
81057 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
81058 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
81059 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
81060 + return -EPERM;
81061 + }
81062 +#endif
81063 + return 0;
81064 +}
81065 +
81066 +int
81067 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
81068 +{
81069 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
81070 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
81071 + !gr_is_outside_chroot(dentry, mnt)) {
81072 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
81073 + return -EPERM;
81074 + }
81075 +#endif
81076 + return 0;
81077 +}
81078 +
81079 +extern const char *captab_log[];
81080 +extern int captab_log_entries;
81081 +
81082 +int
81083 +gr_chroot_is_capable(const int cap)
81084 +{
81085 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
81086 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
81087 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
81088 + if (cap_raised(chroot_caps, cap)) {
81089 + const struct cred *creds = current_cred();
81090 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
81091 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
81092 + }
81093 + return 0;
81094 + }
81095 + }
81096 +#endif
81097 + return 1;
81098 +}
81099 +
81100 +int
81101 +gr_chroot_is_capable_nolog(const int cap)
81102 +{
81103 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
81104 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
81105 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
81106 + if (cap_raised(chroot_caps, cap)) {
81107 + return 0;
81108 + }
81109 + }
81110 +#endif
81111 + return 1;
81112 +}
81113 +
81114 +int
81115 +gr_handle_chroot_sysctl(const int op)
81116 +{
81117 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
81118 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
81119 + && (op & MAY_WRITE))
81120 + return -EACCES;
81121 +#endif
81122 + return 0;
81123 +}
81124 +
81125 +void
81126 +gr_handle_chroot_chdir(struct path *path)
81127 +{
81128 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
81129 + if (grsec_enable_chroot_chdir)
81130 + set_fs_pwd(current->fs, path);
81131 +#endif
81132 + return;
81133 +}
81134 +
81135 +int
81136 +gr_handle_chroot_chmod(const struct dentry *dentry,
81137 + const struct vfsmount *mnt, const int mode)
81138 +{
81139 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
81140 + /* allow chmod +s on directories, but not on files */
81141 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
81142 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
81143 + proc_is_chrooted(current)) {
81144 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
81145 + return -EPERM;
81146 + }
81147 +#endif
81148 + return 0;
81149 +}
81150 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
81151 new file mode 100644
81152 index 0000000..40545bf
81153 --- /dev/null
81154 +++ b/grsecurity/grsec_disabled.c
81155 @@ -0,0 +1,437 @@
81156 +#include <linux/kernel.h>
81157 +#include <linux/module.h>
81158 +#include <linux/sched.h>
81159 +#include <linux/file.h>
81160 +#include <linux/fs.h>
81161 +#include <linux/kdev_t.h>
81162 +#include <linux/net.h>
81163 +#include <linux/in.h>
81164 +#include <linux/ip.h>
81165 +#include <linux/skbuff.h>
81166 +#include <linux/sysctl.h>
81167 +
81168 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
81169 +void
81170 +pax_set_initial_flags(struct linux_binprm *bprm)
81171 +{
81172 + return;
81173 +}
81174 +#endif
81175 +
81176 +#ifdef CONFIG_SYSCTL
81177 +__u32
81178 +gr_handle_sysctl(const struct ctl_table * table, const int op)
81179 +{
81180 + return 0;
81181 +}
81182 +#endif
81183 +
81184 +#ifdef CONFIG_TASKSTATS
81185 +int gr_is_taskstats_denied(int pid)
81186 +{
81187 + return 0;
81188 +}
81189 +#endif
81190 +
81191 +int
81192 +gr_acl_is_enabled(void)
81193 +{
81194 + return 0;
81195 +}
81196 +
81197 +void
81198 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
81199 +{
81200 + return;
81201 +}
81202 +
81203 +int
81204 +gr_handle_rawio(const struct inode *inode)
81205 +{
81206 + return 0;
81207 +}
81208 +
81209 +void
81210 +gr_acl_handle_psacct(struct task_struct *task, const long code)
81211 +{
81212 + return;
81213 +}
81214 +
81215 +int
81216 +gr_handle_ptrace(struct task_struct *task, const long request)
81217 +{
81218 + return 0;
81219 +}
81220 +
81221 +int
81222 +gr_handle_proc_ptrace(struct task_struct *task)
81223 +{
81224 + return 0;
81225 +}
81226 +
81227 +void
81228 +gr_learn_resource(const struct task_struct *task,
81229 + const int res, const unsigned long wanted, const int gt)
81230 +{
81231 + return;
81232 +}
81233 +
81234 +int
81235 +gr_set_acls(const int type)
81236 +{
81237 + return 0;
81238 +}
81239 +
81240 +int
81241 +gr_check_hidden_task(const struct task_struct *tsk)
81242 +{
81243 + return 0;
81244 +}
81245 +
81246 +int
81247 +gr_check_protected_task(const struct task_struct *task)
81248 +{
81249 + return 0;
81250 +}
81251 +
81252 +int
81253 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
81254 +{
81255 + return 0;
81256 +}
81257 +
81258 +void
81259 +gr_copy_label(struct task_struct *tsk)
81260 +{
81261 + return;
81262 +}
81263 +
81264 +void
81265 +gr_set_pax_flags(struct task_struct *task)
81266 +{
81267 + return;
81268 +}
81269 +
81270 +int
81271 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
81272 + const int unsafe_share)
81273 +{
81274 + return 0;
81275 +}
81276 +
81277 +void
81278 +gr_handle_delete(const ino_t ino, const dev_t dev)
81279 +{
81280 + return;
81281 +}
81282 +
81283 +void
81284 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
81285 +{
81286 + return;
81287 +}
81288 +
81289 +void
81290 +gr_handle_crash(struct task_struct *task, const int sig)
81291 +{
81292 + return;
81293 +}
81294 +
81295 +int
81296 +gr_check_crash_exec(const struct file *filp)
81297 +{
81298 + return 0;
81299 +}
81300 +
81301 +int
81302 +gr_check_crash_uid(const uid_t uid)
81303 +{
81304 + return 0;
81305 +}
81306 +
81307 +void
81308 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
81309 + struct dentry *old_dentry,
81310 + struct dentry *new_dentry,
81311 + struct vfsmount *mnt, const __u8 replace)
81312 +{
81313 + return;
81314 +}
81315 +
81316 +int
81317 +gr_search_socket(const int family, const int type, const int protocol)
81318 +{
81319 + return 1;
81320 +}
81321 +
81322 +int
81323 +gr_search_connectbind(const int mode, const struct socket *sock,
81324 + const struct sockaddr_in *addr)
81325 +{
81326 + return 0;
81327 +}
81328 +
81329 +void
81330 +gr_handle_alertkill(struct task_struct *task)
81331 +{
81332 + return;
81333 +}
81334 +
81335 +__u32
81336 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
81337 +{
81338 + return 1;
81339 +}
81340 +
81341 +__u32
81342 +gr_acl_handle_hidden_file(const struct dentry * dentry,
81343 + const struct vfsmount * mnt)
81344 +{
81345 + return 1;
81346 +}
81347 +
81348 +__u32
81349 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
81350 + int acc_mode)
81351 +{
81352 + return 1;
81353 +}
81354 +
81355 +__u32
81356 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
81357 +{
81358 + return 1;
81359 +}
81360 +
81361 +__u32
81362 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
81363 +{
81364 + return 1;
81365 +}
81366 +
81367 +int
81368 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
81369 + unsigned int *vm_flags)
81370 +{
81371 + return 1;
81372 +}
81373 +
81374 +__u32
81375 +gr_acl_handle_truncate(const struct dentry * dentry,
81376 + const struct vfsmount * mnt)
81377 +{
81378 + return 1;
81379 +}
81380 +
81381 +__u32
81382 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
81383 +{
81384 + return 1;
81385 +}
81386 +
81387 +__u32
81388 +gr_acl_handle_access(const struct dentry * dentry,
81389 + const struct vfsmount * mnt, const int fmode)
81390 +{
81391 + return 1;
81392 +}
81393 +
81394 +__u32
81395 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
81396 + umode_t *mode)
81397 +{
81398 + return 1;
81399 +}
81400 +
81401 +__u32
81402 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
81403 +{
81404 + return 1;
81405 +}
81406 +
81407 +__u32
81408 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
81409 +{
81410 + return 1;
81411 +}
81412 +
81413 +void
81414 +grsecurity_init(void)
81415 +{
81416 + return;
81417 +}
81418 +
81419 +umode_t gr_acl_umask(void)
81420 +{
81421 + return 0;
81422 +}
81423 +
81424 +__u32
81425 +gr_acl_handle_mknod(const struct dentry * new_dentry,
81426 + const struct dentry * parent_dentry,
81427 + const struct vfsmount * parent_mnt,
81428 + const int mode)
81429 +{
81430 + return 1;
81431 +}
81432 +
81433 +__u32
81434 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
81435 + const struct dentry * parent_dentry,
81436 + const struct vfsmount * parent_mnt)
81437 +{
81438 + return 1;
81439 +}
81440 +
81441 +__u32
81442 +gr_acl_handle_symlink(const struct dentry * new_dentry,
81443 + const struct dentry * parent_dentry,
81444 + const struct vfsmount * parent_mnt, const char *from)
81445 +{
81446 + return 1;
81447 +}
81448 +
81449 +__u32
81450 +gr_acl_handle_link(const struct dentry * new_dentry,
81451 + const struct dentry * parent_dentry,
81452 + const struct vfsmount * parent_mnt,
81453 + const struct dentry * old_dentry,
81454 + const struct vfsmount * old_mnt, const char *to)
81455 +{
81456 + return 1;
81457 +}
81458 +
81459 +int
81460 +gr_acl_handle_rename(const struct dentry *new_dentry,
81461 + const struct dentry *parent_dentry,
81462 + const struct vfsmount *parent_mnt,
81463 + const struct dentry *old_dentry,
81464 + const struct inode *old_parent_inode,
81465 + const struct vfsmount *old_mnt, const char *newname)
81466 +{
81467 + return 0;
81468 +}
81469 +
81470 +int
81471 +gr_acl_handle_filldir(const struct file *file, const char *name,
81472 + const int namelen, const ino_t ino)
81473 +{
81474 + return 1;
81475 +}
81476 +
81477 +int
81478 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
81479 + const time_t shm_createtime, const uid_t cuid, const int shmid)
81480 +{
81481 + return 1;
81482 +}
81483 +
81484 +int
81485 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
81486 +{
81487 + return 0;
81488 +}
81489 +
81490 +int
81491 +gr_search_accept(const struct socket *sock)
81492 +{
81493 + return 0;
81494 +}
81495 +
81496 +int
81497 +gr_search_listen(const struct socket *sock)
81498 +{
81499 + return 0;
81500 +}
81501 +
81502 +int
81503 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
81504 +{
81505 + return 0;
81506 +}
81507 +
81508 +__u32
81509 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
81510 +{
81511 + return 1;
81512 +}
81513 +
81514 +__u32
81515 +gr_acl_handle_creat(const struct dentry * dentry,
81516 + const struct dentry * p_dentry,
81517 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
81518 + const int imode)
81519 +{
81520 + return 1;
81521 +}
81522 +
81523 +void
81524 +gr_acl_handle_exit(void)
81525 +{
81526 + return;
81527 +}
81528 +
81529 +int
81530 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
81531 +{
81532 + return 1;
81533 +}
81534 +
81535 +void
81536 +gr_set_role_label(const uid_t uid, const gid_t gid)
81537 +{
81538 + return;
81539 +}
81540 +
81541 +int
81542 +gr_acl_handle_procpidmem(const struct task_struct *task)
81543 +{
81544 + return 0;
81545 +}
81546 +
81547 +int
81548 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
81549 +{
81550 + return 0;
81551 +}
81552 +
81553 +int
81554 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
81555 +{
81556 + return 0;
81557 +}
81558 +
81559 +void
81560 +gr_set_kernel_label(struct task_struct *task)
81561 +{
81562 + return;
81563 +}
81564 +
81565 +int
81566 +gr_check_user_change(int real, int effective, int fs)
81567 +{
81568 + return 0;
81569 +}
81570 +
81571 +int
81572 +gr_check_group_change(int real, int effective, int fs)
81573 +{
81574 + return 0;
81575 +}
81576 +
81577 +int gr_acl_enable_at_secure(void)
81578 +{
81579 + return 0;
81580 +}
81581 +
81582 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
81583 +{
81584 + return dentry->d_inode->i_sb->s_dev;
81585 +}
81586 +
81587 +EXPORT_SYMBOL(gr_learn_resource);
81588 +EXPORT_SYMBOL(gr_set_kernel_label);
81589 +#ifdef CONFIG_SECURITY
81590 +EXPORT_SYMBOL(gr_check_user_change);
81591 +EXPORT_SYMBOL(gr_check_group_change);
81592 +#endif
81593 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
81594 new file mode 100644
81595 index 0000000..a96e155
81596 --- /dev/null
81597 +++ b/grsecurity/grsec_exec.c
81598 @@ -0,0 +1,204 @@
81599 +#include <linux/kernel.h>
81600 +#include <linux/sched.h>
81601 +#include <linux/file.h>
81602 +#include <linux/binfmts.h>
81603 +#include <linux/smp_lock.h>
81604 +#include <linux/fs.h>
81605 +#include <linux/types.h>
81606 +#include <linux/grdefs.h>
81607 +#include <linux/grinternal.h>
81608 +#include <linux/capability.h>
81609 +#include <linux/compat.h>
81610 +#include <linux/module.h>
81611 +
81612 +#include <asm/uaccess.h>
81613 +
81614 +#ifdef CONFIG_GRKERNSEC_EXECLOG
81615 +static char gr_exec_arg_buf[132];
81616 +static DEFINE_MUTEX(gr_exec_arg_mutex);
81617 +#endif
81618 +
81619 +void
81620 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
81621 +{
81622 +#ifdef CONFIG_GRKERNSEC_EXECLOG
81623 + char *grarg = gr_exec_arg_buf;
81624 + unsigned int i, x, execlen = 0;
81625 + char c;
81626 +
81627 + if (!((grsec_enable_execlog && grsec_enable_group &&
81628 + in_group_p(grsec_audit_gid))
81629 + || (grsec_enable_execlog && !grsec_enable_group)))
81630 + return;
81631 +
81632 + mutex_lock(&gr_exec_arg_mutex);
81633 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
81634 +
81635 + if (unlikely(argv == NULL))
81636 + goto log;
81637 +
81638 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
81639 + const char __user *p;
81640 + unsigned int len;
81641 +
81642 + if (copy_from_user(&p, argv + i, sizeof(p)))
81643 + goto log;
81644 + if (!p)
81645 + goto log;
81646 + len = strnlen_user(p, 128 - execlen);
81647 + if (len > 128 - execlen)
81648 + len = 128 - execlen;
81649 + else if (len > 0)
81650 + len--;
81651 + if (copy_from_user(grarg + execlen, p, len))
81652 + goto log;
81653 +
81654 + /* rewrite unprintable characters */
81655 + for (x = 0; x < len; x++) {
81656 + c = *(grarg + execlen + x);
81657 + if (c < 32 || c > 126)
81658 + *(grarg + execlen + x) = ' ';
81659 + }
81660 +
81661 + execlen += len;
81662 + *(grarg + execlen) = ' ';
81663 + *(grarg + execlen + 1) = '\0';
81664 + execlen++;
81665 + }
81666 +
81667 + log:
81668 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
81669 + bprm->file->f_path.mnt, grarg);
81670 + mutex_unlock(&gr_exec_arg_mutex);
81671 +#endif
81672 + return;
81673 +}
81674 +
81675 +#ifdef CONFIG_COMPAT
81676 +void
81677 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
81678 +{
81679 +#ifdef CONFIG_GRKERNSEC_EXECLOG
81680 + char *grarg = gr_exec_arg_buf;
81681 + unsigned int i, x, execlen = 0;
81682 + char c;
81683 +
81684 + if (!((grsec_enable_execlog && grsec_enable_group &&
81685 + in_group_p(grsec_audit_gid))
81686 + || (grsec_enable_execlog && !grsec_enable_group)))
81687 + return;
81688 +
81689 + mutex_lock(&gr_exec_arg_mutex);
81690 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
81691 +
81692 + if (unlikely(argv == NULL))
81693 + goto log;
81694 +
81695 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
81696 + compat_uptr_t p;
81697 + unsigned int len;
81698 +
81699 + if (get_user(p, argv + i))
81700 + goto log;
81701 + len = strnlen_user(compat_ptr(p), 128 - execlen);
81702 + if (len > 128 - execlen)
81703 + len = 128 - execlen;
81704 + else if (len > 0)
81705 + len--;
81706 + else
81707 + goto log;
81708 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
81709 + goto log;
81710 +
81711 + /* rewrite unprintable characters */
81712 + for (x = 0; x < len; x++) {
81713 + c = *(grarg + execlen + x);
81714 + if (c < 32 || c > 126)
81715 + *(grarg + execlen + x) = ' ';
81716 + }
81717 +
81718 + execlen += len;
81719 + *(grarg + execlen) = ' ';
81720 + *(grarg + execlen + 1) = '\0';
81721 + execlen++;
81722 + }
81723 +
81724 + log:
81725 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
81726 + bprm->file->f_path.mnt, grarg);
81727 + mutex_unlock(&gr_exec_arg_mutex);
81728 +#endif
81729 + return;
81730 +}
81731 +#endif
81732 +
81733 +#ifdef CONFIG_GRKERNSEC
81734 +extern int gr_acl_is_capable(const int cap);
81735 +extern int gr_acl_is_capable_nolog(const int cap);
81736 +extern int gr_chroot_is_capable(const int cap);
81737 +extern int gr_chroot_is_capable_nolog(const int cap);
81738 +#endif
81739 +
81740 +const char *captab_log[] = {
81741 + "CAP_CHOWN",
81742 + "CAP_DAC_OVERRIDE",
81743 + "CAP_DAC_READ_SEARCH",
81744 + "CAP_FOWNER",
81745 + "CAP_FSETID",
81746 + "CAP_KILL",
81747 + "CAP_SETGID",
81748 + "CAP_SETUID",
81749 + "CAP_SETPCAP",
81750 + "CAP_LINUX_IMMUTABLE",
81751 + "CAP_NET_BIND_SERVICE",
81752 + "CAP_NET_BROADCAST",
81753 + "CAP_NET_ADMIN",
81754 + "CAP_NET_RAW",
81755 + "CAP_IPC_LOCK",
81756 + "CAP_IPC_OWNER",
81757 + "CAP_SYS_MODULE",
81758 + "CAP_SYS_RAWIO",
81759 + "CAP_SYS_CHROOT",
81760 + "CAP_SYS_PTRACE",
81761 + "CAP_SYS_PACCT",
81762 + "CAP_SYS_ADMIN",
81763 + "CAP_SYS_BOOT",
81764 + "CAP_SYS_NICE",
81765 + "CAP_SYS_RESOURCE",
81766 + "CAP_SYS_TIME",
81767 + "CAP_SYS_TTY_CONFIG",
81768 + "CAP_MKNOD",
81769 + "CAP_LEASE",
81770 + "CAP_AUDIT_WRITE",
81771 + "CAP_AUDIT_CONTROL",
81772 + "CAP_SETFCAP",
81773 + "CAP_MAC_OVERRIDE",
81774 + "CAP_MAC_ADMIN"
81775 +};
81776 +
81777 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
81778 +
81779 +int gr_is_capable(const int cap)
81780 +{
81781 +#ifdef CONFIG_GRKERNSEC
81782 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
81783 + return 1;
81784 + return 0;
81785 +#else
81786 + return 1;
81787 +#endif
81788 +}
81789 +
81790 +int gr_is_capable_nolog(const int cap)
81791 +{
81792 +#ifdef CONFIG_GRKERNSEC
81793 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
81794 + return 1;
81795 + return 0;
81796 +#else
81797 + return 1;
81798 +#endif
81799 +}
81800 +
81801 +EXPORT_SYMBOL(gr_is_capable);
81802 +EXPORT_SYMBOL(gr_is_capable_nolog);
81803 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
81804 new file mode 100644
81805 index 0000000..d3ee748
81806 --- /dev/null
81807 +++ b/grsecurity/grsec_fifo.c
81808 @@ -0,0 +1,24 @@
81809 +#include <linux/kernel.h>
81810 +#include <linux/sched.h>
81811 +#include <linux/fs.h>
81812 +#include <linux/file.h>
81813 +#include <linux/grinternal.h>
81814 +
81815 +int
81816 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
81817 + const struct dentry *dir, const int flag, const int acc_mode)
81818 +{
81819 +#ifdef CONFIG_GRKERNSEC_FIFO
81820 + const struct cred *cred = current_cred();
81821 +
81822 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
81823 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
81824 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
81825 + (cred->fsuid != dentry->d_inode->i_uid)) {
81826 + if (!inode_permission(dentry->d_inode, acc_mode))
81827 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
81828 + return -EACCES;
81829 + }
81830 +#endif
81831 + return 0;
81832 +}
81833 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
81834 new file mode 100644
81835 index 0000000..8ca18bf
81836 --- /dev/null
81837 +++ b/grsecurity/grsec_fork.c
81838 @@ -0,0 +1,23 @@
81839 +#include <linux/kernel.h>
81840 +#include <linux/sched.h>
81841 +#include <linux/grsecurity.h>
81842 +#include <linux/grinternal.h>
81843 +#include <linux/errno.h>
81844 +
81845 +void
81846 +gr_log_forkfail(const int retval)
81847 +{
81848 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
81849 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
81850 + switch (retval) {
81851 + case -EAGAIN:
81852 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
81853 + break;
81854 + case -ENOMEM:
81855 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
81856 + break;
81857 + }
81858 + }
81859 +#endif
81860 + return;
81861 +}
81862 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
81863 new file mode 100644
81864 index 0000000..1e995d3
81865 --- /dev/null
81866 +++ b/grsecurity/grsec_init.c
81867 @@ -0,0 +1,278 @@
81868 +#include <linux/kernel.h>
81869 +#include <linux/sched.h>
81870 +#include <linux/mm.h>
81871 +#include <linux/smp_lock.h>
81872 +#include <linux/gracl.h>
81873 +#include <linux/slab.h>
81874 +#include <linux/vmalloc.h>
81875 +#include <linux/percpu.h>
81876 +#include <linux/module.h>
81877 +
81878 +int grsec_enable_ptrace_readexec;
81879 +int grsec_enable_setxid;
81880 +int grsec_enable_brute;
81881 +int grsec_enable_link;
81882 +int grsec_enable_dmesg;
81883 +int grsec_enable_harden_ptrace;
81884 +int grsec_enable_fifo;
81885 +int grsec_enable_execlog;
81886 +int grsec_enable_signal;
81887 +int grsec_enable_forkfail;
81888 +int grsec_enable_audit_ptrace;
81889 +int grsec_enable_time;
81890 +int grsec_enable_audit_textrel;
81891 +int grsec_enable_group;
81892 +int grsec_audit_gid;
81893 +int grsec_enable_chdir;
81894 +int grsec_enable_mount;
81895 +int grsec_enable_rofs;
81896 +int grsec_enable_chroot_findtask;
81897 +int grsec_enable_chroot_mount;
81898 +int grsec_enable_chroot_shmat;
81899 +int grsec_enable_chroot_fchdir;
81900 +int grsec_enable_chroot_double;
81901 +int grsec_enable_chroot_pivot;
81902 +int grsec_enable_chroot_chdir;
81903 +int grsec_enable_chroot_chmod;
81904 +int grsec_enable_chroot_mknod;
81905 +int grsec_enable_chroot_nice;
81906 +int grsec_enable_chroot_execlog;
81907 +int grsec_enable_chroot_caps;
81908 +int grsec_enable_chroot_sysctl;
81909 +int grsec_enable_chroot_unix;
81910 +int grsec_enable_tpe;
81911 +int grsec_tpe_gid;
81912 +int grsec_enable_blackhole;
81913 +#ifdef CONFIG_IPV6_MODULE
81914 +EXPORT_SYMBOL(grsec_enable_blackhole);
81915 +#endif
81916 +int grsec_lastack_retries;
81917 +int grsec_enable_tpe_all;
81918 +int grsec_enable_tpe_invert;
81919 +int grsec_enable_socket_all;
81920 +int grsec_socket_all_gid;
81921 +int grsec_enable_socket_client;
81922 +int grsec_socket_client_gid;
81923 +int grsec_enable_socket_server;
81924 +int grsec_socket_server_gid;
81925 +int grsec_resource_logging;
81926 +int grsec_disable_privio;
81927 +int grsec_enable_log_rwxmaps;
81928 +int grsec_lock;
81929 +
81930 +DEFINE_SPINLOCK(grsec_alert_lock);
81931 +unsigned long grsec_alert_wtime = 0;
81932 +unsigned long grsec_alert_fyet = 0;
81933 +
81934 +DEFINE_SPINLOCK(grsec_audit_lock);
81935 +
81936 +DEFINE_RWLOCK(grsec_exec_file_lock);
81937 +
81938 +char *gr_shared_page[4];
81939 +
81940 +char *gr_alert_log_fmt;
81941 +char *gr_audit_log_fmt;
81942 +char *gr_alert_log_buf;
81943 +char *gr_audit_log_buf;
81944 +
81945 +extern struct gr_arg *gr_usermode;
81946 +extern unsigned char *gr_system_salt;
81947 +extern unsigned char *gr_system_sum;
81948 +
81949 +void __init
81950 +grsecurity_init(void)
81951 +{
81952 + int j;
81953 + /* create the per-cpu shared pages */
81954 +
81955 +#ifdef CONFIG_X86
81956 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
81957 +#endif
81958 +
81959 + for (j = 0; j < 4; j++) {
81960 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
81961 + if (gr_shared_page[j] == NULL) {
81962 + panic("Unable to allocate grsecurity shared page");
81963 + return;
81964 + }
81965 + }
81966 +
81967 + /* allocate log buffers */
81968 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
81969 + if (!gr_alert_log_fmt) {
81970 + panic("Unable to allocate grsecurity alert log format buffer");
81971 + return;
81972 + }
81973 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
81974 + if (!gr_audit_log_fmt) {
81975 + panic("Unable to allocate grsecurity audit log format buffer");
81976 + return;
81977 + }
81978 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
81979 + if (!gr_alert_log_buf) {
81980 + panic("Unable to allocate grsecurity alert log buffer");
81981 + return;
81982 + }
81983 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
81984 + if (!gr_audit_log_buf) {
81985 + panic("Unable to allocate grsecurity audit log buffer");
81986 + return;
81987 + }
81988 +
81989 + /* allocate memory for authentication structure */
81990 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
81991 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
81992 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
81993 +
81994 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
81995 + panic("Unable to allocate grsecurity authentication structure");
81996 + return;
81997 + }
81998 +
81999 +
82000 +#ifdef CONFIG_GRKERNSEC_IO
82001 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
82002 + grsec_disable_privio = 1;
82003 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
82004 + grsec_disable_privio = 1;
82005 +#else
82006 + grsec_disable_privio = 0;
82007 +#endif
82008 +#endif
82009 +
82010 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
82011 + /* for backward compatibility, tpe_invert always defaults to on if
82012 + enabled in the kernel
82013 + */
82014 + grsec_enable_tpe_invert = 1;
82015 +#endif
82016 +
82017 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
82018 +#ifndef CONFIG_GRKERNSEC_SYSCTL
82019 + grsec_lock = 1;
82020 +#endif
82021 +
82022 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
82023 + grsec_enable_audit_textrel = 1;
82024 +#endif
82025 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82026 + grsec_enable_log_rwxmaps = 1;
82027 +#endif
82028 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
82029 + grsec_enable_group = 1;
82030 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
82031 +#endif
82032 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
82033 + grsec_enable_chdir = 1;
82034 +#endif
82035 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
82036 + grsec_enable_harden_ptrace = 1;
82037 +#endif
82038 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82039 + grsec_enable_mount = 1;
82040 +#endif
82041 +#ifdef CONFIG_GRKERNSEC_LINK
82042 + grsec_enable_link = 1;
82043 +#endif
82044 +#ifdef CONFIG_GRKERNSEC_BRUTE
82045 + grsec_enable_brute = 1;
82046 +#endif
82047 +#ifdef CONFIG_GRKERNSEC_DMESG
82048 + grsec_enable_dmesg = 1;
82049 +#endif
82050 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82051 + grsec_enable_blackhole = 1;
82052 + grsec_lastack_retries = 4;
82053 +#endif
82054 +#ifdef CONFIG_GRKERNSEC_FIFO
82055 + grsec_enable_fifo = 1;
82056 +#endif
82057 +#ifdef CONFIG_GRKERNSEC_EXECLOG
82058 + grsec_enable_execlog = 1;
82059 +#endif
82060 +#ifdef CONFIG_GRKERNSEC_SETXID
82061 + grsec_enable_setxid = 1;
82062 +#endif
82063 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
82064 + grsec_enable_ptrace_readexec = 1;
82065 +#endif
82066 +#ifdef CONFIG_GRKERNSEC_SIGNAL
82067 + grsec_enable_signal = 1;
82068 +#endif
82069 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
82070 + grsec_enable_forkfail = 1;
82071 +#endif
82072 +#ifdef CONFIG_GRKERNSEC_TIME
82073 + grsec_enable_time = 1;
82074 +#endif
82075 +#ifdef CONFIG_GRKERNSEC_RESLOG
82076 + grsec_resource_logging = 1;
82077 +#endif
82078 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82079 + grsec_enable_chroot_findtask = 1;
82080 +#endif
82081 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
82082 + grsec_enable_chroot_unix = 1;
82083 +#endif
82084 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
82085 + grsec_enable_chroot_mount = 1;
82086 +#endif
82087 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
82088 + grsec_enable_chroot_fchdir = 1;
82089 +#endif
82090 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
82091 + grsec_enable_chroot_shmat = 1;
82092 +#endif
82093 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
82094 + grsec_enable_audit_ptrace = 1;
82095 +#endif
82096 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
82097 + grsec_enable_chroot_double = 1;
82098 +#endif
82099 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
82100 + grsec_enable_chroot_pivot = 1;
82101 +#endif
82102 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
82103 + grsec_enable_chroot_chdir = 1;
82104 +#endif
82105 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
82106 + grsec_enable_chroot_chmod = 1;
82107 +#endif
82108 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
82109 + grsec_enable_chroot_mknod = 1;
82110 +#endif
82111 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
82112 + grsec_enable_chroot_nice = 1;
82113 +#endif
82114 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
82115 + grsec_enable_chroot_execlog = 1;
82116 +#endif
82117 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
82118 + grsec_enable_chroot_caps = 1;
82119 +#endif
82120 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
82121 + grsec_enable_chroot_sysctl = 1;
82122 +#endif
82123 +#ifdef CONFIG_GRKERNSEC_TPE
82124 + grsec_enable_tpe = 1;
82125 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
82126 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
82127 + grsec_enable_tpe_all = 1;
82128 +#endif
82129 +#endif
82130 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
82131 + grsec_enable_socket_all = 1;
82132 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
82133 +#endif
82134 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
82135 + grsec_enable_socket_client = 1;
82136 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
82137 +#endif
82138 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
82139 + grsec_enable_socket_server = 1;
82140 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
82141 +#endif
82142 +#endif
82143 +
82144 + return;
82145 +}
82146 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
82147 new file mode 100644
82148 index 0000000..3efe141
82149 --- /dev/null
82150 +++ b/grsecurity/grsec_link.c
82151 @@ -0,0 +1,43 @@
82152 +#include <linux/kernel.h>
82153 +#include <linux/sched.h>
82154 +#include <linux/fs.h>
82155 +#include <linux/file.h>
82156 +#include <linux/grinternal.h>
82157 +
82158 +int
82159 +gr_handle_follow_link(const struct inode *parent,
82160 + const struct inode *inode,
82161 + const struct dentry *dentry, const struct vfsmount *mnt)
82162 +{
82163 +#ifdef CONFIG_GRKERNSEC_LINK
82164 + const struct cred *cred = current_cred();
82165 +
82166 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
82167 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
82168 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
82169 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
82170 + return -EACCES;
82171 + }
82172 +#endif
82173 + return 0;
82174 +}
82175 +
82176 +int
82177 +gr_handle_hardlink(const struct dentry *dentry,
82178 + const struct vfsmount *mnt,
82179 + struct inode *inode, const int mode, const char *to)
82180 +{
82181 +#ifdef CONFIG_GRKERNSEC_LINK
82182 + const struct cred *cred = current_cred();
82183 +
82184 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
82185 + (!S_ISREG(mode) || (mode & S_ISUID) ||
82186 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
82187 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
82188 + !capable(CAP_FOWNER) && cred->uid) {
82189 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
82190 + return -EPERM;
82191 + }
82192 +#endif
82193 + return 0;
82194 +}
82195 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
82196 new file mode 100644
82197 index 0000000..a45d2e9
82198 --- /dev/null
82199 +++ b/grsecurity/grsec_log.c
82200 @@ -0,0 +1,322 @@
82201 +#include <linux/kernel.h>
82202 +#include <linux/sched.h>
82203 +#include <linux/file.h>
82204 +#include <linux/tty.h>
82205 +#include <linux/fs.h>
82206 +#include <linux/grinternal.h>
82207 +
82208 +#ifdef CONFIG_TREE_PREEMPT_RCU
82209 +#define DISABLE_PREEMPT() preempt_disable()
82210 +#define ENABLE_PREEMPT() preempt_enable()
82211 +#else
82212 +#define DISABLE_PREEMPT()
82213 +#define ENABLE_PREEMPT()
82214 +#endif
82215 +
82216 +#define BEGIN_LOCKS(x) \
82217 + DISABLE_PREEMPT(); \
82218 + rcu_read_lock(); \
82219 + read_lock(&tasklist_lock); \
82220 + read_lock(&grsec_exec_file_lock); \
82221 + if (x != GR_DO_AUDIT) \
82222 + spin_lock(&grsec_alert_lock); \
82223 + else \
82224 + spin_lock(&grsec_audit_lock)
82225 +
82226 +#define END_LOCKS(x) \
82227 + if (x != GR_DO_AUDIT) \
82228 + spin_unlock(&grsec_alert_lock); \
82229 + else \
82230 + spin_unlock(&grsec_audit_lock); \
82231 + read_unlock(&grsec_exec_file_lock); \
82232 + read_unlock(&tasklist_lock); \
82233 + rcu_read_unlock(); \
82234 + ENABLE_PREEMPT(); \
82235 + if (x == GR_DONT_AUDIT) \
82236 + gr_handle_alertkill(current)
82237 +
82238 +enum {
82239 + FLOODING,
82240 + NO_FLOODING
82241 +};
82242 +
82243 +extern char *gr_alert_log_fmt;
82244 +extern char *gr_audit_log_fmt;
82245 +extern char *gr_alert_log_buf;
82246 +extern char *gr_audit_log_buf;
82247 +
82248 +static int gr_log_start(int audit)
82249 +{
82250 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
82251 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
82252 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82253 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
82254 + unsigned long curr_secs = get_seconds();
82255 +
82256 + if (audit == GR_DO_AUDIT)
82257 + goto set_fmt;
82258 +
82259 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
82260 + grsec_alert_wtime = curr_secs;
82261 + grsec_alert_fyet = 0;
82262 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
82263 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
82264 + grsec_alert_fyet++;
82265 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
82266 + grsec_alert_wtime = curr_secs;
82267 + grsec_alert_fyet++;
82268 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
82269 + return FLOODING;
82270 + }
82271 + else return FLOODING;
82272 +
82273 +set_fmt:
82274 +#endif
82275 + memset(buf, 0, PAGE_SIZE);
82276 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
82277 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
82278 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
82279 + } else if (current->signal->curr_ip) {
82280 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
82281 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
82282 + } else if (gr_acl_is_enabled()) {
82283 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
82284 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
82285 + } else {
82286 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
82287 + strcpy(buf, fmt);
82288 + }
82289 +
82290 + return NO_FLOODING;
82291 +}
82292 +
82293 +static void gr_log_middle(int audit, const char *msg, va_list ap)
82294 + __attribute__ ((format (printf, 2, 0)));
82295 +
82296 +static void gr_log_middle(int audit, const char *msg, va_list ap)
82297 +{
82298 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82299 + unsigned int len = strlen(buf);
82300 +
82301 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
82302 +
82303 + return;
82304 +}
82305 +
82306 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
82307 + __attribute__ ((format (printf, 2, 3)));
82308 +
82309 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
82310 +{
82311 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82312 + unsigned int len = strlen(buf);
82313 + va_list ap;
82314 +
82315 + va_start(ap, msg);
82316 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
82317 + va_end(ap);
82318 +
82319 + return;
82320 +}
82321 +
82322 +static void gr_log_end(int audit, int append_default)
82323 +{
82324 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82325 +
82326 + if (append_default) {
82327 + unsigned int len = strlen(buf);
82328 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
82329 + }
82330 +
82331 + printk("%s\n", buf);
82332 +
82333 + return;
82334 +}
82335 +
82336 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
82337 +{
82338 + int logtype;
82339 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
82340 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
82341 + void *voidptr = NULL;
82342 + int num1 = 0, num2 = 0;
82343 + unsigned long ulong1 = 0, ulong2 = 0;
82344 + struct dentry *dentry = NULL;
82345 + struct vfsmount *mnt = NULL;
82346 + struct file *file = NULL;
82347 + struct task_struct *task = NULL;
82348 + const struct cred *cred, *pcred;
82349 + va_list ap;
82350 +
82351 + BEGIN_LOCKS(audit);
82352 + logtype = gr_log_start(audit);
82353 + if (logtype == FLOODING) {
82354 + END_LOCKS(audit);
82355 + return;
82356 + }
82357 + va_start(ap, argtypes);
82358 + switch (argtypes) {
82359 + case GR_TTYSNIFF:
82360 + task = va_arg(ap, struct task_struct *);
82361 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
82362 + break;
82363 + case GR_SYSCTL_HIDDEN:
82364 + str1 = va_arg(ap, char *);
82365 + gr_log_middle_varargs(audit, msg, result, str1);
82366 + break;
82367 + case GR_RBAC:
82368 + dentry = va_arg(ap, struct dentry *);
82369 + mnt = va_arg(ap, struct vfsmount *);
82370 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
82371 + break;
82372 + case GR_RBAC_STR:
82373 + dentry = va_arg(ap, struct dentry *);
82374 + mnt = va_arg(ap, struct vfsmount *);
82375 + str1 = va_arg(ap, char *);
82376 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
82377 + break;
82378 + case GR_STR_RBAC:
82379 + str1 = va_arg(ap, char *);
82380 + dentry = va_arg(ap, struct dentry *);
82381 + mnt = va_arg(ap, struct vfsmount *);
82382 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
82383 + break;
82384 + case GR_RBAC_MODE2:
82385 + dentry = va_arg(ap, struct dentry *);
82386 + mnt = va_arg(ap, struct vfsmount *);
82387 + str1 = va_arg(ap, char *);
82388 + str2 = va_arg(ap, char *);
82389 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
82390 + break;
82391 + case GR_RBAC_MODE3:
82392 + dentry = va_arg(ap, struct dentry *);
82393 + mnt = va_arg(ap, struct vfsmount *);
82394 + str1 = va_arg(ap, char *);
82395 + str2 = va_arg(ap, char *);
82396 + str3 = va_arg(ap, char *);
82397 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
82398 + break;
82399 + case GR_FILENAME:
82400 + dentry = va_arg(ap, struct dentry *);
82401 + mnt = va_arg(ap, struct vfsmount *);
82402 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
82403 + break;
82404 + case GR_STR_FILENAME:
82405 + str1 = va_arg(ap, char *);
82406 + dentry = va_arg(ap, struct dentry *);
82407 + mnt = va_arg(ap, struct vfsmount *);
82408 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
82409 + break;
82410 + case GR_FILENAME_STR:
82411 + dentry = va_arg(ap, struct dentry *);
82412 + mnt = va_arg(ap, struct vfsmount *);
82413 + str1 = va_arg(ap, char *);
82414 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
82415 + break;
82416 + case GR_FILENAME_TWO_INT:
82417 + dentry = va_arg(ap, struct dentry *);
82418 + mnt = va_arg(ap, struct vfsmount *);
82419 + num1 = va_arg(ap, int);
82420 + num2 = va_arg(ap, int);
82421 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
82422 + break;
82423 + case GR_FILENAME_TWO_INT_STR:
82424 + dentry = va_arg(ap, struct dentry *);
82425 + mnt = va_arg(ap, struct vfsmount *);
82426 + num1 = va_arg(ap, int);
82427 + num2 = va_arg(ap, int);
82428 + str1 = va_arg(ap, char *);
82429 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
82430 + break;
82431 + case GR_TEXTREL:
82432 + file = va_arg(ap, struct file *);
82433 + ulong1 = va_arg(ap, unsigned long);
82434 + ulong2 = va_arg(ap, unsigned long);
82435 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
82436 + break;
82437 + case GR_PTRACE:
82438 + task = va_arg(ap, struct task_struct *);
82439 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
82440 + break;
82441 + case GR_RESOURCE:
82442 + task = va_arg(ap, struct task_struct *);
82443 + cred = __task_cred(task);
82444 + pcred = __task_cred(task->real_parent);
82445 + ulong1 = va_arg(ap, unsigned long);
82446 + str1 = va_arg(ap, char *);
82447 + ulong2 = va_arg(ap, unsigned long);
82448 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82449 + break;
82450 + case GR_CAP:
82451 + task = va_arg(ap, struct task_struct *);
82452 + cred = __task_cred(task);
82453 + pcred = __task_cred(task->real_parent);
82454 + str1 = va_arg(ap, char *);
82455 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82456 + break;
82457 + case GR_SIG:
82458 + str1 = va_arg(ap, char *);
82459 + voidptr = va_arg(ap, void *);
82460 + gr_log_middle_varargs(audit, msg, str1, voidptr);
82461 + break;
82462 + case GR_SIG2:
82463 + task = va_arg(ap, struct task_struct *);
82464 + cred = __task_cred(task);
82465 + pcred = __task_cred(task->real_parent);
82466 + num1 = va_arg(ap, int);
82467 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82468 + break;
82469 + case GR_CRASH1:
82470 + task = va_arg(ap, struct task_struct *);
82471 + cred = __task_cred(task);
82472 + pcred = __task_cred(task->real_parent);
82473 + ulong1 = va_arg(ap, unsigned long);
82474 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
82475 + break;
82476 + case GR_CRASH2:
82477 + task = va_arg(ap, struct task_struct *);
82478 + cred = __task_cred(task);
82479 + pcred = __task_cred(task->real_parent);
82480 + ulong1 = va_arg(ap, unsigned long);
82481 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
82482 + break;
82483 + case GR_RWXMAP:
82484 + file = va_arg(ap, struct file *);
82485 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
82486 + break;
82487 + case GR_PSACCT:
82488 + {
82489 + unsigned int wday, cday;
82490 + __u8 whr, chr;
82491 + __u8 wmin, cmin;
82492 + __u8 wsec, csec;
82493 + char cur_tty[64] = { 0 };
82494 + char parent_tty[64] = { 0 };
82495 +
82496 + task = va_arg(ap, struct task_struct *);
82497 + wday = va_arg(ap, unsigned int);
82498 + cday = va_arg(ap, unsigned int);
82499 + whr = va_arg(ap, int);
82500 + chr = va_arg(ap, int);
82501 + wmin = va_arg(ap, int);
82502 + cmin = va_arg(ap, int);
82503 + wsec = va_arg(ap, int);
82504 + csec = va_arg(ap, int);
82505 + ulong1 = va_arg(ap, unsigned long);
82506 + cred = __task_cred(task);
82507 + pcred = __task_cred(task->real_parent);
82508 +
82509 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82510 + }
82511 + break;
82512 + default:
82513 + gr_log_middle(audit, msg, ap);
82514 + }
82515 + va_end(ap);
82516 + // these don't need DEFAULTSECARGS printed on the end
82517 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
82518 + gr_log_end(audit, 0);
82519 + else
82520 + gr_log_end(audit, 1);
82521 + END_LOCKS(audit);
82522 +}
82523 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
82524 new file mode 100644
82525 index 0000000..f536303
82526 --- /dev/null
82527 +++ b/grsecurity/grsec_mem.c
82528 @@ -0,0 +1,40 @@
82529 +#include <linux/kernel.h>
82530 +#include <linux/sched.h>
82531 +#include <linux/mm.h>
82532 +#include <linux/mman.h>
82533 +#include <linux/grinternal.h>
82534 +
82535 +void
82536 +gr_handle_ioperm(void)
82537 +{
82538 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
82539 + return;
82540 +}
82541 +
82542 +void
82543 +gr_handle_iopl(void)
82544 +{
82545 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
82546 + return;
82547 +}
82548 +
82549 +void
82550 +gr_handle_mem_readwrite(u64 from, u64 to)
82551 +{
82552 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
82553 + return;
82554 +}
82555 +
82556 +void
82557 +gr_handle_vm86(void)
82558 +{
82559 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
82560 + return;
82561 +}
82562 +
82563 +void
82564 +gr_log_badprocpid(const char *entry)
82565 +{
82566 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
82567 + return;
82568 +}
82569 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
82570 new file mode 100644
82571 index 0000000..2131422
82572 --- /dev/null
82573 +++ b/grsecurity/grsec_mount.c
82574 @@ -0,0 +1,62 @@
82575 +#include <linux/kernel.h>
82576 +#include <linux/sched.h>
82577 +#include <linux/mount.h>
82578 +#include <linux/grsecurity.h>
82579 +#include <linux/grinternal.h>
82580 +
82581 +void
82582 +gr_log_remount(const char *devname, const int retval)
82583 +{
82584 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82585 + if (grsec_enable_mount && (retval >= 0))
82586 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
82587 +#endif
82588 + return;
82589 +}
82590 +
82591 +void
82592 +gr_log_unmount(const char *devname, const int retval)
82593 +{
82594 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82595 + if (grsec_enable_mount && (retval >= 0))
82596 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
82597 +#endif
82598 + return;
82599 +}
82600 +
82601 +void
82602 +gr_log_mount(const char *from, const char *to, const int retval)
82603 +{
82604 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82605 + if (grsec_enable_mount && (retval >= 0))
82606 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
82607 +#endif
82608 + return;
82609 +}
82610 +
82611 +int
82612 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
82613 +{
82614 +#ifdef CONFIG_GRKERNSEC_ROFS
82615 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
82616 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
82617 + return -EPERM;
82618 + } else
82619 + return 0;
82620 +#endif
82621 + return 0;
82622 +}
82623 +
82624 +int
82625 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
82626 +{
82627 +#ifdef CONFIG_GRKERNSEC_ROFS
82628 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
82629 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
82630 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
82631 + return -EPERM;
82632 + } else
82633 + return 0;
82634 +#endif
82635 + return 0;
82636 +}
82637 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
82638 new file mode 100644
82639 index 0000000..a3b12a0
82640 --- /dev/null
82641 +++ b/grsecurity/grsec_pax.c
82642 @@ -0,0 +1,36 @@
82643 +#include <linux/kernel.h>
82644 +#include <linux/sched.h>
82645 +#include <linux/mm.h>
82646 +#include <linux/file.h>
82647 +#include <linux/grinternal.h>
82648 +#include <linux/grsecurity.h>
82649 +
82650 +void
82651 +gr_log_textrel(struct vm_area_struct * vma)
82652 +{
82653 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
82654 + if (grsec_enable_audit_textrel)
82655 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
82656 +#endif
82657 + return;
82658 +}
82659 +
82660 +void
82661 +gr_log_rwxmmap(struct file *file)
82662 +{
82663 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82664 + if (grsec_enable_log_rwxmaps)
82665 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
82666 +#endif
82667 + return;
82668 +}
82669 +
82670 +void
82671 +gr_log_rwxmprotect(struct file *file)
82672 +{
82673 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82674 + if (grsec_enable_log_rwxmaps)
82675 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
82676 +#endif
82677 + return;
82678 +}
82679 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
82680 new file mode 100644
82681 index 0000000..78f8733
82682 --- /dev/null
82683 +++ b/grsecurity/grsec_ptrace.c
82684 @@ -0,0 +1,30 @@
82685 +#include <linux/kernel.h>
82686 +#include <linux/sched.h>
82687 +#include <linux/grinternal.h>
82688 +#include <linux/security.h>
82689 +
82690 +void
82691 +gr_audit_ptrace(struct task_struct *task)
82692 +{
82693 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
82694 + if (grsec_enable_audit_ptrace)
82695 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
82696 +#endif
82697 + return;
82698 +}
82699 +
82700 +int
82701 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
82702 +{
82703 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
82704 + const struct dentry *dentry = file->f_path.dentry;
82705 + const struct vfsmount *mnt = file->f_path.mnt;
82706 +
82707 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
82708 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
82709 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
82710 + return -EACCES;
82711 + }
82712 +#endif
82713 + return 0;
82714 +}
82715 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
82716 new file mode 100644
82717 index 0000000..c648492
82718 --- /dev/null
82719 +++ b/grsecurity/grsec_sig.c
82720 @@ -0,0 +1,206 @@
82721 +#include <linux/kernel.h>
82722 +#include <linux/sched.h>
82723 +#include <linux/delay.h>
82724 +#include <linux/grsecurity.h>
82725 +#include <linux/grinternal.h>
82726 +#include <linux/hardirq.h>
82727 +
82728 +char *signames[] = {
82729 + [SIGSEGV] = "Segmentation fault",
82730 + [SIGILL] = "Illegal instruction",
82731 + [SIGABRT] = "Abort",
82732 + [SIGBUS] = "Invalid alignment/Bus error"
82733 +};
82734 +
82735 +void
82736 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
82737 +{
82738 +#ifdef CONFIG_GRKERNSEC_SIGNAL
82739 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
82740 + (sig == SIGABRT) || (sig == SIGBUS))) {
82741 + if (t->pid == current->pid) {
82742 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
82743 + } else {
82744 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
82745 + }
82746 + }
82747 +#endif
82748 + return;
82749 +}
82750 +
82751 +int
82752 +gr_handle_signal(const struct task_struct *p, const int sig)
82753 +{
82754 +#ifdef CONFIG_GRKERNSEC
82755 + /* ignore the 0 signal for protected task checks */
82756 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
82757 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
82758 + return -EPERM;
82759 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
82760 + return -EPERM;
82761 + }
82762 +#endif
82763 + return 0;
82764 +}
82765 +
82766 +#ifdef CONFIG_GRKERNSEC
82767 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
82768 +
82769 +int gr_fake_force_sig(int sig, struct task_struct *t)
82770 +{
82771 + unsigned long int flags;
82772 + int ret, blocked, ignored;
82773 + struct k_sigaction *action;
82774 +
82775 + spin_lock_irqsave(&t->sighand->siglock, flags);
82776 + action = &t->sighand->action[sig-1];
82777 + ignored = action->sa.sa_handler == SIG_IGN;
82778 + blocked = sigismember(&t->blocked, sig);
82779 + if (blocked || ignored) {
82780 + action->sa.sa_handler = SIG_DFL;
82781 + if (blocked) {
82782 + sigdelset(&t->blocked, sig);
82783 + recalc_sigpending_and_wake(t);
82784 + }
82785 + }
82786 + if (action->sa.sa_handler == SIG_DFL)
82787 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
82788 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
82789 +
82790 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
82791 +
82792 + return ret;
82793 +}
82794 +#endif
82795 +
82796 +#ifdef CONFIG_GRKERNSEC_BRUTE
82797 +#define GR_USER_BAN_TIME (15 * 60)
82798 +
82799 +static int __get_dumpable(unsigned long mm_flags)
82800 +{
82801 + int ret;
82802 +
82803 + ret = mm_flags & MMF_DUMPABLE_MASK;
82804 + return (ret >= 2) ? 2 : ret;
82805 +}
82806 +#endif
82807 +
82808 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
82809 +{
82810 +#ifdef CONFIG_GRKERNSEC_BRUTE
82811 + uid_t uid = 0;
82812 +
82813 + if (!grsec_enable_brute)
82814 + return;
82815 +
82816 + rcu_read_lock();
82817 + read_lock(&tasklist_lock);
82818 + read_lock(&grsec_exec_file_lock);
82819 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
82820 + p->real_parent->brute = 1;
82821 + else {
82822 + const struct cred *cred = __task_cred(p), *cred2;
82823 + struct task_struct *tsk, *tsk2;
82824 +
82825 + if (!__get_dumpable(mm_flags) && cred->uid) {
82826 + struct user_struct *user;
82827 +
82828 + uid = cred->uid;
82829 +
82830 + /* this is put upon execution past expiration */
82831 + user = find_user(uid);
82832 + if (user == NULL)
82833 + goto unlock;
82834 + user->banned = 1;
82835 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
82836 + if (user->ban_expires == ~0UL)
82837 + user->ban_expires--;
82838 +
82839 + do_each_thread(tsk2, tsk) {
82840 + cred2 = __task_cred(tsk);
82841 + if (tsk != p && cred2->uid == uid)
82842 + gr_fake_force_sig(SIGKILL, tsk);
82843 + } while_each_thread(tsk2, tsk);
82844 + }
82845 + }
82846 +unlock:
82847 + read_unlock(&grsec_exec_file_lock);
82848 + read_unlock(&tasklist_lock);
82849 + rcu_read_unlock();
82850 +
82851 + if (uid)
82852 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
82853 +#endif
82854 + return;
82855 +}
82856 +
82857 +void gr_handle_brute_check(void)
82858 +{
82859 +#ifdef CONFIG_GRKERNSEC_BRUTE
82860 + if (current->brute)
82861 + msleep(30 * 1000);
82862 +#endif
82863 + return;
82864 +}
82865 +
82866 +void gr_handle_kernel_exploit(void)
82867 +{
82868 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
82869 + const struct cred *cred;
82870 + struct task_struct *tsk, *tsk2;
82871 + struct user_struct *user;
82872 + uid_t uid;
82873 +
82874 + if (in_irq() || in_serving_softirq() || in_nmi())
82875 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
82876 +
82877 + uid = current_uid();
82878 +
82879 + if (uid == 0)
82880 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
82881 + else {
82882 + /* kill all the processes of this user, hold a reference
82883 + to their creds struct, and prevent them from creating
82884 + another process until system reset
82885 + */
82886 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
82887 + /* we intentionally leak this ref */
82888 + user = get_uid(current->cred->user);
82889 + if (user) {
82890 + user->banned = 1;
82891 + user->ban_expires = ~0UL;
82892 + }
82893 +
82894 + read_lock(&tasklist_lock);
82895 + do_each_thread(tsk2, tsk) {
82896 + cred = __task_cred(tsk);
82897 + if (cred->uid == uid)
82898 + gr_fake_force_sig(SIGKILL, tsk);
82899 + } while_each_thread(tsk2, tsk);
82900 + read_unlock(&tasklist_lock);
82901 + }
82902 +#endif
82903 +}
82904 +
82905 +int __gr_process_user_ban(struct user_struct *user)
82906 +{
82907 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
82908 + if (unlikely(user->banned)) {
82909 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
82910 + user->banned = 0;
82911 + user->ban_expires = 0;
82912 + free_uid(user);
82913 + } else
82914 + return -EPERM;
82915 + }
82916 +#endif
82917 + return 0;
82918 +}
82919 +
82920 +int gr_process_user_ban(void)
82921 +{
82922 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
82923 + return __gr_process_user_ban(current->cred->user);
82924 +#endif
82925 + return 0;
82926 +}
82927 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
82928 new file mode 100644
82929 index 0000000..7512ea9
82930 --- /dev/null
82931 +++ b/grsecurity/grsec_sock.c
82932 @@ -0,0 +1,275 @@
82933 +#include <linux/kernel.h>
82934 +#include <linux/module.h>
82935 +#include <linux/sched.h>
82936 +#include <linux/file.h>
82937 +#include <linux/net.h>
82938 +#include <linux/in.h>
82939 +#include <linux/ip.h>
82940 +#include <net/sock.h>
82941 +#include <net/inet_sock.h>
82942 +#include <linux/grsecurity.h>
82943 +#include <linux/grinternal.h>
82944 +#include <linux/gracl.h>
82945 +
82946 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
82947 +EXPORT_SYMBOL(gr_cap_rtnetlink);
82948 +
82949 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
82950 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
82951 +
82952 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
82953 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
82954 +
82955 +#ifdef CONFIG_UNIX_MODULE
82956 +EXPORT_SYMBOL(gr_acl_handle_unix);
82957 +EXPORT_SYMBOL(gr_acl_handle_mknod);
82958 +EXPORT_SYMBOL(gr_handle_chroot_unix);
82959 +EXPORT_SYMBOL(gr_handle_create);
82960 +#endif
82961 +
82962 +#ifdef CONFIG_GRKERNSEC
82963 +#define gr_conn_table_size 32749
82964 +struct conn_table_entry {
82965 + struct conn_table_entry *next;
82966 + struct signal_struct *sig;
82967 +};
82968 +
82969 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
82970 +DEFINE_SPINLOCK(gr_conn_table_lock);
82971 +
82972 +extern const char * gr_socktype_to_name(unsigned char type);
82973 +extern const char * gr_proto_to_name(unsigned char proto);
82974 +extern const char * gr_sockfamily_to_name(unsigned char family);
82975 +
82976 +static __inline__ int
82977 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
82978 +{
82979 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
82980 +}
82981 +
82982 +static __inline__ int
82983 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
82984 + __u16 sport, __u16 dport)
82985 +{
82986 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
82987 + sig->gr_sport == sport && sig->gr_dport == dport))
82988 + return 1;
82989 + else
82990 + return 0;
82991 +}
82992 +
82993 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
82994 +{
82995 + struct conn_table_entry **match;
82996 + unsigned int index;
82997 +
82998 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
82999 + sig->gr_sport, sig->gr_dport,
83000 + gr_conn_table_size);
83001 +
83002 + newent->sig = sig;
83003 +
83004 + match = &gr_conn_table[index];
83005 + newent->next = *match;
83006 + *match = newent;
83007 +
83008 + return;
83009 +}
83010 +
83011 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
83012 +{
83013 + struct conn_table_entry *match, *last = NULL;
83014 + unsigned int index;
83015 +
83016 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
83017 + sig->gr_sport, sig->gr_dport,
83018 + gr_conn_table_size);
83019 +
83020 + match = gr_conn_table[index];
83021 + while (match && !conn_match(match->sig,
83022 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
83023 + sig->gr_dport)) {
83024 + last = match;
83025 + match = match->next;
83026 + }
83027 +
83028 + if (match) {
83029 + if (last)
83030 + last->next = match->next;
83031 + else
83032 + gr_conn_table[index] = NULL;
83033 + kfree(match);
83034 + }
83035 +
83036 + return;
83037 +}
83038 +
83039 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
83040 + __u16 sport, __u16 dport)
83041 +{
83042 + struct conn_table_entry *match;
83043 + unsigned int index;
83044 +
83045 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
83046 +
83047 + match = gr_conn_table[index];
83048 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
83049 + match = match->next;
83050 +
83051 + if (match)
83052 + return match->sig;
83053 + else
83054 + return NULL;
83055 +}
83056 +
83057 +#endif
83058 +
83059 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
83060 +{
83061 +#ifdef CONFIG_GRKERNSEC
83062 + struct signal_struct *sig = task->signal;
83063 + struct conn_table_entry *newent;
83064 +
83065 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
83066 + if (newent == NULL)
83067 + return;
83068 + /* no bh lock needed since we are called with bh disabled */
83069 + spin_lock(&gr_conn_table_lock);
83070 + gr_del_task_from_ip_table_nolock(sig);
83071 + sig->gr_saddr = inet->rcv_saddr;
83072 + sig->gr_daddr = inet->daddr;
83073 + sig->gr_sport = inet->sport;
83074 + sig->gr_dport = inet->dport;
83075 + gr_add_to_task_ip_table_nolock(sig, newent);
83076 + spin_unlock(&gr_conn_table_lock);
83077 +#endif
83078 + return;
83079 +}
83080 +
83081 +void gr_del_task_from_ip_table(struct task_struct *task)
83082 +{
83083 +#ifdef CONFIG_GRKERNSEC
83084 + spin_lock_bh(&gr_conn_table_lock);
83085 + gr_del_task_from_ip_table_nolock(task->signal);
83086 + spin_unlock_bh(&gr_conn_table_lock);
83087 +#endif
83088 + return;
83089 +}
83090 +
83091 +void
83092 +gr_attach_curr_ip(const struct sock *sk)
83093 +{
83094 +#ifdef CONFIG_GRKERNSEC
83095 + struct signal_struct *p, *set;
83096 + const struct inet_sock *inet = inet_sk(sk);
83097 +
83098 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
83099 + return;
83100 +
83101 + set = current->signal;
83102 +
83103 + spin_lock_bh(&gr_conn_table_lock);
83104 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
83105 + inet->dport, inet->sport);
83106 + if (unlikely(p != NULL)) {
83107 + set->curr_ip = p->curr_ip;
83108 + set->used_accept = 1;
83109 + gr_del_task_from_ip_table_nolock(p);
83110 + spin_unlock_bh(&gr_conn_table_lock);
83111 + return;
83112 + }
83113 + spin_unlock_bh(&gr_conn_table_lock);
83114 +
83115 + set->curr_ip = inet->daddr;
83116 + set->used_accept = 1;
83117 +#endif
83118 + return;
83119 +}
83120 +
83121 +int
83122 +gr_handle_sock_all(const int family, const int type, const int protocol)
83123 +{
83124 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
83125 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
83126 + (family != AF_UNIX)) {
83127 + if (family == AF_INET)
83128 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
83129 + else
83130 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
83131 + return -EACCES;
83132 + }
83133 +#endif
83134 + return 0;
83135 +}
83136 +
83137 +int
83138 +gr_handle_sock_server(const struct sockaddr *sck)
83139 +{
83140 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83141 + if (grsec_enable_socket_server &&
83142 + in_group_p(grsec_socket_server_gid) &&
83143 + sck && (sck->sa_family != AF_UNIX) &&
83144 + (sck->sa_family != AF_LOCAL)) {
83145 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
83146 + return -EACCES;
83147 + }
83148 +#endif
83149 + return 0;
83150 +}
83151 +
83152 +int
83153 +gr_handle_sock_server_other(const struct sock *sck)
83154 +{
83155 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83156 + if (grsec_enable_socket_server &&
83157 + in_group_p(grsec_socket_server_gid) &&
83158 + sck && (sck->sk_family != AF_UNIX) &&
83159 + (sck->sk_family != AF_LOCAL)) {
83160 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
83161 + return -EACCES;
83162 + }
83163 +#endif
83164 + return 0;
83165 +}
83166 +
83167 +int
83168 +gr_handle_sock_client(const struct sockaddr *sck)
83169 +{
83170 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
83171 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
83172 + sck && (sck->sa_family != AF_UNIX) &&
83173 + (sck->sa_family != AF_LOCAL)) {
83174 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
83175 + return -EACCES;
83176 + }
83177 +#endif
83178 + return 0;
83179 +}
83180 +
83181 +kernel_cap_t
83182 +gr_cap_rtnetlink(struct sock *sock)
83183 +{
83184 +#ifdef CONFIG_GRKERNSEC
83185 + if (!gr_acl_is_enabled())
83186 + return current_cap();
83187 + else if (sock->sk_protocol == NETLINK_ISCSI &&
83188 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
83189 + gr_is_capable(CAP_SYS_ADMIN))
83190 + return current_cap();
83191 + else if (sock->sk_protocol == NETLINK_AUDIT &&
83192 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
83193 + gr_is_capable(CAP_AUDIT_WRITE) &&
83194 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
83195 + gr_is_capable(CAP_AUDIT_CONTROL))
83196 + return current_cap();
83197 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
83198 + ((sock->sk_protocol == NETLINK_ROUTE) ?
83199 + gr_is_capable_nolog(CAP_NET_ADMIN) :
83200 + gr_is_capable(CAP_NET_ADMIN)))
83201 + return current_cap();
83202 + else
83203 + return __cap_empty_set;
83204 +#else
83205 + return current_cap();
83206 +#endif
83207 +}
83208 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
83209 new file mode 100644
83210 index 0000000..31f3258
83211 --- /dev/null
83212 +++ b/grsecurity/grsec_sysctl.c
83213 @@ -0,0 +1,499 @@
83214 +#include <linux/kernel.h>
83215 +#include <linux/sched.h>
83216 +#include <linux/sysctl.h>
83217 +#include <linux/grsecurity.h>
83218 +#include <linux/grinternal.h>
83219 +
83220 +int
83221 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
83222 +{
83223 +#ifdef CONFIG_GRKERNSEC_SYSCTL
83224 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
83225 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
83226 + return -EACCES;
83227 + }
83228 +#endif
83229 + return 0;
83230 +}
83231 +
83232 +#ifdef CONFIG_GRKERNSEC_ROFS
83233 +static int __maybe_unused one = 1;
83234 +#endif
83235 +
83236 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
83237 +ctl_table grsecurity_table[] = {
83238 +#ifdef CONFIG_GRKERNSEC_SYSCTL
83239 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
83240 +#ifdef CONFIG_GRKERNSEC_IO
83241 + {
83242 + .ctl_name = CTL_UNNUMBERED,
83243 + .procname = "disable_priv_io",
83244 + .data = &grsec_disable_privio,
83245 + .maxlen = sizeof(int),
83246 + .mode = 0600,
83247 + .proc_handler = &proc_dointvec,
83248 + },
83249 +#endif
83250 +#endif
83251 +#ifdef CONFIG_GRKERNSEC_LINK
83252 + {
83253 + .ctl_name = CTL_UNNUMBERED,
83254 + .procname = "linking_restrictions",
83255 + .data = &grsec_enable_link,
83256 + .maxlen = sizeof(int),
83257 + .mode = 0600,
83258 + .proc_handler = &proc_dointvec,
83259 + },
83260 +#endif
83261 +#ifdef CONFIG_GRKERNSEC_BRUTE
83262 + {
83263 + .ctl_name = CTL_UNNUMBERED,
83264 + .procname = "deter_bruteforce",
83265 + .data = &grsec_enable_brute,
83266 + .maxlen = sizeof(int),
83267 + .mode = 0600,
83268 + .proc_handler = &proc_dointvec,
83269 + },
83270 +#endif
83271 +#ifdef CONFIG_GRKERNSEC_FIFO
83272 + {
83273 + .ctl_name = CTL_UNNUMBERED,
83274 + .procname = "fifo_restrictions",
83275 + .data = &grsec_enable_fifo,
83276 + .maxlen = sizeof(int),
83277 + .mode = 0600,
83278 + .proc_handler = &proc_dointvec,
83279 + },
83280 +#endif
83281 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
83282 + {
83283 + .ctl_name = CTL_UNNUMBERED,
83284 + .procname = "ptrace_readexec",
83285 + .data = &grsec_enable_ptrace_readexec,
83286 + .maxlen = sizeof(int),
83287 + .mode = 0600,
83288 + .proc_handler = &proc_dointvec,
83289 + },
83290 +#endif
83291 +#ifdef CONFIG_GRKERNSEC_SETXID
83292 + {
83293 + .ctl_name = CTL_UNNUMBERED,
83294 + .procname = "consistent_setxid",
83295 + .data = &grsec_enable_setxid,
83296 + .maxlen = sizeof(int),
83297 + .mode = 0600,
83298 + .proc_handler = &proc_dointvec,
83299 + },
83300 +#endif
83301 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83302 + {
83303 + .ctl_name = CTL_UNNUMBERED,
83304 + .procname = "ip_blackhole",
83305 + .data = &grsec_enable_blackhole,
83306 + .maxlen = sizeof(int),
83307 + .mode = 0600,
83308 + .proc_handler = &proc_dointvec,
83309 + },
83310 + {
83311 + .ctl_name = CTL_UNNUMBERED,
83312 + .procname = "lastack_retries",
83313 + .data = &grsec_lastack_retries,
83314 + .maxlen = sizeof(int),
83315 + .mode = 0600,
83316 + .proc_handler = &proc_dointvec,
83317 + },
83318 +#endif
83319 +#ifdef CONFIG_GRKERNSEC_EXECLOG
83320 + {
83321 + .ctl_name = CTL_UNNUMBERED,
83322 + .procname = "exec_logging",
83323 + .data = &grsec_enable_execlog,
83324 + .maxlen = sizeof(int),
83325 + .mode = 0600,
83326 + .proc_handler = &proc_dointvec,
83327 + },
83328 +#endif
83329 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
83330 + {
83331 + .ctl_name = CTL_UNNUMBERED,
83332 + .procname = "rwxmap_logging",
83333 + .data = &grsec_enable_log_rwxmaps,
83334 + .maxlen = sizeof(int),
83335 + .mode = 0600,
83336 + .proc_handler = &proc_dointvec,
83337 + },
83338 +#endif
83339 +#ifdef CONFIG_GRKERNSEC_SIGNAL
83340 + {
83341 + .ctl_name = CTL_UNNUMBERED,
83342 + .procname = "signal_logging",
83343 + .data = &grsec_enable_signal,
83344 + .maxlen = sizeof(int),
83345 + .mode = 0600,
83346 + .proc_handler = &proc_dointvec,
83347 + },
83348 +#endif
83349 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
83350 + {
83351 + .ctl_name = CTL_UNNUMBERED,
83352 + .procname = "forkfail_logging",
83353 + .data = &grsec_enable_forkfail,
83354 + .maxlen = sizeof(int),
83355 + .mode = 0600,
83356 + .proc_handler = &proc_dointvec,
83357 + },
83358 +#endif
83359 +#ifdef CONFIG_GRKERNSEC_TIME
83360 + {
83361 + .ctl_name = CTL_UNNUMBERED,
83362 + .procname = "timechange_logging",
83363 + .data = &grsec_enable_time,
83364 + .maxlen = sizeof(int),
83365 + .mode = 0600,
83366 + .proc_handler = &proc_dointvec,
83367 + },
83368 +#endif
83369 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
83370 + {
83371 + .ctl_name = CTL_UNNUMBERED,
83372 + .procname = "chroot_deny_shmat",
83373 + .data = &grsec_enable_chroot_shmat,
83374 + .maxlen = sizeof(int),
83375 + .mode = 0600,
83376 + .proc_handler = &proc_dointvec,
83377 + },
83378 +#endif
83379 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
83380 + {
83381 + .ctl_name = CTL_UNNUMBERED,
83382 + .procname = "chroot_deny_unix",
83383 + .data = &grsec_enable_chroot_unix,
83384 + .maxlen = sizeof(int),
83385 + .mode = 0600,
83386 + .proc_handler = &proc_dointvec,
83387 + },
83388 +#endif
83389 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
83390 + {
83391 + .ctl_name = CTL_UNNUMBERED,
83392 + .procname = "chroot_deny_mount",
83393 + .data = &grsec_enable_chroot_mount,
83394 + .maxlen = sizeof(int),
83395 + .mode = 0600,
83396 + .proc_handler = &proc_dointvec,
83397 + },
83398 +#endif
83399 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
83400 + {
83401 + .ctl_name = CTL_UNNUMBERED,
83402 + .procname = "chroot_deny_fchdir",
83403 + .data = &grsec_enable_chroot_fchdir,
83404 + .maxlen = sizeof(int),
83405 + .mode = 0600,
83406 + .proc_handler = &proc_dointvec,
83407 + },
83408 +#endif
83409 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
83410 + {
83411 + .ctl_name = CTL_UNNUMBERED,
83412 + .procname = "chroot_deny_chroot",
83413 + .data = &grsec_enable_chroot_double,
83414 + .maxlen = sizeof(int),
83415 + .mode = 0600,
83416 + .proc_handler = &proc_dointvec,
83417 + },
83418 +#endif
83419 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
83420 + {
83421 + .ctl_name = CTL_UNNUMBERED,
83422 + .procname = "chroot_deny_pivot",
83423 + .data = &grsec_enable_chroot_pivot,
83424 + .maxlen = sizeof(int),
83425 + .mode = 0600,
83426 + .proc_handler = &proc_dointvec,
83427 + },
83428 +#endif
83429 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
83430 + {
83431 + .ctl_name = CTL_UNNUMBERED,
83432 + .procname = "chroot_enforce_chdir",
83433 + .data = &grsec_enable_chroot_chdir,
83434 + .maxlen = sizeof(int),
83435 + .mode = 0600,
83436 + .proc_handler = &proc_dointvec,
83437 + },
83438 +#endif
83439 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
83440 + {
83441 + .ctl_name = CTL_UNNUMBERED,
83442 + .procname = "chroot_deny_chmod",
83443 + .data = &grsec_enable_chroot_chmod,
83444 + .maxlen = sizeof(int),
83445 + .mode = 0600,
83446 + .proc_handler = &proc_dointvec,
83447 + },
83448 +#endif
83449 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
83450 + {
83451 + .ctl_name = CTL_UNNUMBERED,
83452 + .procname = "chroot_deny_mknod",
83453 + .data = &grsec_enable_chroot_mknod,
83454 + .maxlen = sizeof(int),
83455 + .mode = 0600,
83456 + .proc_handler = &proc_dointvec,
83457 + },
83458 +#endif
83459 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
83460 + {
83461 + .ctl_name = CTL_UNNUMBERED,
83462 + .procname = "chroot_restrict_nice",
83463 + .data = &grsec_enable_chroot_nice,
83464 + .maxlen = sizeof(int),
83465 + .mode = 0600,
83466 + .proc_handler = &proc_dointvec,
83467 + },
83468 +#endif
83469 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
83470 + {
83471 + .ctl_name = CTL_UNNUMBERED,
83472 + .procname = "chroot_execlog",
83473 + .data = &grsec_enable_chroot_execlog,
83474 + .maxlen = sizeof(int),
83475 + .mode = 0600,
83476 + .proc_handler = &proc_dointvec,
83477 + },
83478 +#endif
83479 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
83480 + {
83481 + .ctl_name = CTL_UNNUMBERED,
83482 + .procname = "chroot_caps",
83483 + .data = &grsec_enable_chroot_caps,
83484 + .maxlen = sizeof(int),
83485 + .mode = 0600,
83486 + .proc_handler = &proc_dointvec,
83487 + },
83488 +#endif
83489 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
83490 + {
83491 + .ctl_name = CTL_UNNUMBERED,
83492 + .procname = "chroot_deny_sysctl",
83493 + .data = &grsec_enable_chroot_sysctl,
83494 + .maxlen = sizeof(int),
83495 + .mode = 0600,
83496 + .proc_handler = &proc_dointvec,
83497 + },
83498 +#endif
83499 +#ifdef CONFIG_GRKERNSEC_TPE
83500 + {
83501 + .ctl_name = CTL_UNNUMBERED,
83502 + .procname = "tpe",
83503 + .data = &grsec_enable_tpe,
83504 + .maxlen = sizeof(int),
83505 + .mode = 0600,
83506 + .proc_handler = &proc_dointvec,
83507 + },
83508 + {
83509 + .ctl_name = CTL_UNNUMBERED,
83510 + .procname = "tpe_gid",
83511 + .data = &grsec_tpe_gid,
83512 + .maxlen = sizeof(int),
83513 + .mode = 0600,
83514 + .proc_handler = &proc_dointvec,
83515 + },
83516 +#endif
83517 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
83518 + {
83519 + .ctl_name = CTL_UNNUMBERED,
83520 + .procname = "tpe_invert",
83521 + .data = &grsec_enable_tpe_invert,
83522 + .maxlen = sizeof(int),
83523 + .mode = 0600,
83524 + .proc_handler = &proc_dointvec,
83525 + },
83526 +#endif
83527 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
83528 + {
83529 + .ctl_name = CTL_UNNUMBERED,
83530 + .procname = "tpe_restrict_all",
83531 + .data = &grsec_enable_tpe_all,
83532 + .maxlen = sizeof(int),
83533 + .mode = 0600,
83534 + .proc_handler = &proc_dointvec,
83535 + },
83536 +#endif
83537 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
83538 + {
83539 + .ctl_name = CTL_UNNUMBERED,
83540 + .procname = "socket_all",
83541 + .data = &grsec_enable_socket_all,
83542 + .maxlen = sizeof(int),
83543 + .mode = 0600,
83544 + .proc_handler = &proc_dointvec,
83545 + },
83546 + {
83547 + .ctl_name = CTL_UNNUMBERED,
83548 + .procname = "socket_all_gid",
83549 + .data = &grsec_socket_all_gid,
83550 + .maxlen = sizeof(int),
83551 + .mode = 0600,
83552 + .proc_handler = &proc_dointvec,
83553 + },
83554 +#endif
83555 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
83556 + {
83557 + .ctl_name = CTL_UNNUMBERED,
83558 + .procname = "socket_client",
83559 + .data = &grsec_enable_socket_client,
83560 + .maxlen = sizeof(int),
83561 + .mode = 0600,
83562 + .proc_handler = &proc_dointvec,
83563 + },
83564 + {
83565 + .ctl_name = CTL_UNNUMBERED,
83566 + .procname = "socket_client_gid",
83567 + .data = &grsec_socket_client_gid,
83568 + .maxlen = sizeof(int),
83569 + .mode = 0600,
83570 + .proc_handler = &proc_dointvec,
83571 + },
83572 +#endif
83573 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83574 + {
83575 + .ctl_name = CTL_UNNUMBERED,
83576 + .procname = "socket_server",
83577 + .data = &grsec_enable_socket_server,
83578 + .maxlen = sizeof(int),
83579 + .mode = 0600,
83580 + .proc_handler = &proc_dointvec,
83581 + },
83582 + {
83583 + .ctl_name = CTL_UNNUMBERED,
83584 + .procname = "socket_server_gid",
83585 + .data = &grsec_socket_server_gid,
83586 + .maxlen = sizeof(int),
83587 + .mode = 0600,
83588 + .proc_handler = &proc_dointvec,
83589 + },
83590 +#endif
83591 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
83592 + {
83593 + .ctl_name = CTL_UNNUMBERED,
83594 + .procname = "audit_group",
83595 + .data = &grsec_enable_group,
83596 + .maxlen = sizeof(int),
83597 + .mode = 0600,
83598 + .proc_handler = &proc_dointvec,
83599 + },
83600 + {
83601 + .ctl_name = CTL_UNNUMBERED,
83602 + .procname = "audit_gid",
83603 + .data = &grsec_audit_gid,
83604 + .maxlen = sizeof(int),
83605 + .mode = 0600,
83606 + .proc_handler = &proc_dointvec,
83607 + },
83608 +#endif
83609 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
83610 + {
83611 + .ctl_name = CTL_UNNUMBERED,
83612 + .procname = "audit_chdir",
83613 + .data = &grsec_enable_chdir,
83614 + .maxlen = sizeof(int),
83615 + .mode = 0600,
83616 + .proc_handler = &proc_dointvec,
83617 + },
83618 +#endif
83619 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
83620 + {
83621 + .ctl_name = CTL_UNNUMBERED,
83622 + .procname = "audit_mount",
83623 + .data = &grsec_enable_mount,
83624 + .maxlen = sizeof(int),
83625 + .mode = 0600,
83626 + .proc_handler = &proc_dointvec,
83627 + },
83628 +#endif
83629 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
83630 + {
83631 + .ctl_name = CTL_UNNUMBERED,
83632 + .procname = "audit_textrel",
83633 + .data = &grsec_enable_audit_textrel,
83634 + .maxlen = sizeof(int),
83635 + .mode = 0600,
83636 + .proc_handler = &proc_dointvec,
83637 + },
83638 +#endif
83639 +#ifdef CONFIG_GRKERNSEC_DMESG
83640 + {
83641 + .ctl_name = CTL_UNNUMBERED,
83642 + .procname = "dmesg",
83643 + .data = &grsec_enable_dmesg,
83644 + .maxlen = sizeof(int),
83645 + .mode = 0600,
83646 + .proc_handler = &proc_dointvec,
83647 + },
83648 +#endif
83649 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83650 + {
83651 + .ctl_name = CTL_UNNUMBERED,
83652 + .procname = "chroot_findtask",
83653 + .data = &grsec_enable_chroot_findtask,
83654 + .maxlen = sizeof(int),
83655 + .mode = 0600,
83656 + .proc_handler = &proc_dointvec,
83657 + },
83658 +#endif
83659 +#ifdef CONFIG_GRKERNSEC_RESLOG
83660 + {
83661 + .ctl_name = CTL_UNNUMBERED,
83662 + .procname = "resource_logging",
83663 + .data = &grsec_resource_logging,
83664 + .maxlen = sizeof(int),
83665 + .mode = 0600,
83666 + .proc_handler = &proc_dointvec,
83667 + },
83668 +#endif
83669 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
83670 + {
83671 + .ctl_name = CTL_UNNUMBERED,
83672 + .procname = "audit_ptrace",
83673 + .data = &grsec_enable_audit_ptrace,
83674 + .maxlen = sizeof(int),
83675 + .mode = 0600,
83676 + .proc_handler = &proc_dointvec,
83677 + },
83678 +#endif
83679 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
83680 + {
83681 + .ctl_name = CTL_UNNUMBERED,
83682 + .procname = "harden_ptrace",
83683 + .data = &grsec_enable_harden_ptrace,
83684 + .maxlen = sizeof(int),
83685 + .mode = 0600,
83686 + .proc_handler = &proc_dointvec,
83687 + },
83688 +#endif
83689 + {
83690 + .ctl_name = CTL_UNNUMBERED,
83691 + .procname = "grsec_lock",
83692 + .data = &grsec_lock,
83693 + .maxlen = sizeof(int),
83694 + .mode = 0600,
83695 + .proc_handler = &proc_dointvec,
83696 + },
83697 +#endif
83698 +#ifdef CONFIG_GRKERNSEC_ROFS
83699 + {
83700 + .ctl_name = CTL_UNNUMBERED,
83701 + .procname = "romount_protect",
83702 + .data = &grsec_enable_rofs,
83703 + .maxlen = sizeof(int),
83704 + .mode = 0600,
83705 + .proc_handler = &proc_dointvec_minmax,
83706 + .extra1 = &one,
83707 + .extra2 = &one,
83708 + },
83709 +#endif
83710 + { .ctl_name = 0 }
83711 +};
83712 +#endif
83713 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
83714 new file mode 100644
83715 index 0000000..0dc13c3
83716 --- /dev/null
83717 +++ b/grsecurity/grsec_time.c
83718 @@ -0,0 +1,16 @@
83719 +#include <linux/kernel.h>
83720 +#include <linux/sched.h>
83721 +#include <linux/grinternal.h>
83722 +#include <linux/module.h>
83723 +
83724 +void
83725 +gr_log_timechange(void)
83726 +{
83727 +#ifdef CONFIG_GRKERNSEC_TIME
83728 + if (grsec_enable_time)
83729 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
83730 +#endif
83731 + return;
83732 +}
83733 +
83734 +EXPORT_SYMBOL(gr_log_timechange);
83735 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
83736 new file mode 100644
83737 index 0000000..07e0dc0
83738 --- /dev/null
83739 +++ b/grsecurity/grsec_tpe.c
83740 @@ -0,0 +1,73 @@
83741 +#include <linux/kernel.h>
83742 +#include <linux/sched.h>
83743 +#include <linux/file.h>
83744 +#include <linux/fs.h>
83745 +#include <linux/grinternal.h>
83746 +
83747 +extern int gr_acl_tpe_check(void);
83748 +
83749 +int
83750 +gr_tpe_allow(const struct file *file)
83751 +{
83752 +#ifdef CONFIG_GRKERNSEC
83753 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
83754 + const struct cred *cred = current_cred();
83755 + char *msg = NULL;
83756 + char *msg2 = NULL;
83757 +
83758 + // never restrict root
83759 + if (!cred->uid)
83760 + return 1;
83761 +
83762 + if (grsec_enable_tpe) {
83763 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
83764 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
83765 + msg = "not being in trusted group";
83766 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
83767 + msg = "being in untrusted group";
83768 +#else
83769 + if (in_group_p(grsec_tpe_gid))
83770 + msg = "being in untrusted group";
83771 +#endif
83772 + }
83773 + if (!msg && gr_acl_tpe_check())
83774 + msg = "being in untrusted role";
83775 +
83776 + // not in any affected group/role
83777 + if (!msg)
83778 + goto next_check;
83779 +
83780 + if (inode->i_uid)
83781 + msg2 = "file in non-root-owned directory";
83782 + else if (inode->i_mode & S_IWOTH)
83783 + msg2 = "file in world-writable directory";
83784 + else if (inode->i_mode & S_IWGRP)
83785 + msg2 = "file in group-writable directory";
83786 +
83787 + if (msg && msg2) {
83788 + char fullmsg[70] = {0};
83789 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
83790 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
83791 + return 0;
83792 + }
83793 + msg = NULL;
83794 +next_check:
83795 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
83796 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
83797 + return 1;
83798 +
83799 + if (inode->i_uid && (inode->i_uid != cred->uid))
83800 + msg = "directory not owned by user";
83801 + else if (inode->i_mode & S_IWOTH)
83802 + msg = "file in world-writable directory";
83803 + else if (inode->i_mode & S_IWGRP)
83804 + msg = "file in group-writable directory";
83805 +
83806 + if (msg) {
83807 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
83808 + return 0;
83809 + }
83810 +#endif
83811 +#endif
83812 + return 1;
83813 +}
83814 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
83815 new file mode 100644
83816 index 0000000..9f7b1ac
83817 --- /dev/null
83818 +++ b/grsecurity/grsum.c
83819 @@ -0,0 +1,61 @@
83820 +#include <linux/err.h>
83821 +#include <linux/kernel.h>
83822 +#include <linux/sched.h>
83823 +#include <linux/mm.h>
83824 +#include <linux/scatterlist.h>
83825 +#include <linux/crypto.h>
83826 +#include <linux/gracl.h>
83827 +
83828 +
83829 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
83830 +#error "crypto and sha256 must be built into the kernel"
83831 +#endif
83832 +
83833 +int
83834 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
83835 +{
83836 + char *p;
83837 + struct crypto_hash *tfm;
83838 + struct hash_desc desc;
83839 + struct scatterlist sg;
83840 + unsigned char temp_sum[GR_SHA_LEN];
83841 + volatile int retval = 0;
83842 + volatile int dummy = 0;
83843 + unsigned int i;
83844 +
83845 + sg_init_table(&sg, 1);
83846 +
83847 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
83848 + if (IS_ERR(tfm)) {
83849 + /* should never happen, since sha256 should be built in */
83850 + return 1;
83851 + }
83852 +
83853 + desc.tfm = tfm;
83854 + desc.flags = 0;
83855 +
83856 + crypto_hash_init(&desc);
83857 +
83858 + p = salt;
83859 + sg_set_buf(&sg, p, GR_SALT_LEN);
83860 + crypto_hash_update(&desc, &sg, sg.length);
83861 +
83862 + p = entry->pw;
83863 + sg_set_buf(&sg, p, strlen(p));
83864 +
83865 + crypto_hash_update(&desc, &sg, sg.length);
83866 +
83867 + crypto_hash_final(&desc, temp_sum);
83868 +
83869 + memset(entry->pw, 0, GR_PW_LEN);
83870 +
83871 + for (i = 0; i < GR_SHA_LEN; i++)
83872 + if (sum[i] != temp_sum[i])
83873 + retval = 1;
83874 + else
83875 + dummy = 1; // waste a cycle
83876 +
83877 + crypto_free_hash(tfm);
83878 +
83879 + return retval;
83880 +}
83881 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
83882 index 3cd9ccd..fe16d47 100644
83883 --- a/include/acpi/acpi_bus.h
83884 +++ b/include/acpi/acpi_bus.h
83885 @@ -107,7 +107,7 @@ struct acpi_device_ops {
83886 acpi_op_bind bind;
83887 acpi_op_unbind unbind;
83888 acpi_op_notify notify;
83889 -};
83890 +} __no_const;
83891
83892 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
83893
83894 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
83895 index f4906f6..71feb73 100644
83896 --- a/include/acpi/acpi_drivers.h
83897 +++ b/include/acpi/acpi_drivers.h
83898 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
83899 Dock Station
83900 -------------------------------------------------------------------------- */
83901 struct acpi_dock_ops {
83902 - acpi_notify_handler handler;
83903 - acpi_notify_handler uevent;
83904 + const acpi_notify_handler handler;
83905 + const acpi_notify_handler uevent;
83906 };
83907
83908 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
83909 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
83910 extern int register_dock_notifier(struct notifier_block *nb);
83911 extern void unregister_dock_notifier(struct notifier_block *nb);
83912 extern int register_hotplug_dock_device(acpi_handle handle,
83913 - struct acpi_dock_ops *ops,
83914 + const struct acpi_dock_ops *ops,
83915 void *context);
83916 extern void unregister_hotplug_dock_device(acpi_handle handle);
83917 #else
83918 @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
83919 {
83920 }
83921 static inline int register_hotplug_dock_device(acpi_handle handle,
83922 - struct acpi_dock_ops *ops,
83923 + const struct acpi_dock_ops *ops,
83924 void *context)
83925 {
83926 return -ENODEV;
83927 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
83928 index b7babf0..a9ac9fc 100644
83929 --- a/include/asm-generic/atomic-long.h
83930 +++ b/include/asm-generic/atomic-long.h
83931 @@ -22,6 +22,12 @@
83932
83933 typedef atomic64_t atomic_long_t;
83934
83935 +#ifdef CONFIG_PAX_REFCOUNT
83936 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
83937 +#else
83938 +typedef atomic64_t atomic_long_unchecked_t;
83939 +#endif
83940 +
83941 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
83942
83943 static inline long atomic_long_read(atomic_long_t *l)
83944 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
83945 return (long)atomic64_read(v);
83946 }
83947
83948 +#ifdef CONFIG_PAX_REFCOUNT
83949 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
83950 +{
83951 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83952 +
83953 + return (long)atomic64_read_unchecked(v);
83954 +}
83955 +#endif
83956 +
83957 static inline void atomic_long_set(atomic_long_t *l, long i)
83958 {
83959 atomic64_t *v = (atomic64_t *)l;
83960 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
83961 atomic64_set(v, i);
83962 }
83963
83964 +#ifdef CONFIG_PAX_REFCOUNT
83965 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
83966 +{
83967 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83968 +
83969 + atomic64_set_unchecked(v, i);
83970 +}
83971 +#endif
83972 +
83973 static inline void atomic_long_inc(atomic_long_t *l)
83974 {
83975 atomic64_t *v = (atomic64_t *)l;
83976 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
83977 atomic64_inc(v);
83978 }
83979
83980 +#ifdef CONFIG_PAX_REFCOUNT
83981 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
83982 +{
83983 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83984 +
83985 + atomic64_inc_unchecked(v);
83986 +}
83987 +#endif
83988 +
83989 static inline void atomic_long_dec(atomic_long_t *l)
83990 {
83991 atomic64_t *v = (atomic64_t *)l;
83992 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
83993 atomic64_dec(v);
83994 }
83995
83996 +#ifdef CONFIG_PAX_REFCOUNT
83997 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
83998 +{
83999 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84000 +
84001 + atomic64_dec_unchecked(v);
84002 +}
84003 +#endif
84004 +
84005 static inline void atomic_long_add(long i, atomic_long_t *l)
84006 {
84007 atomic64_t *v = (atomic64_t *)l;
84008 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
84009 atomic64_add(i, v);
84010 }
84011
84012 +#ifdef CONFIG_PAX_REFCOUNT
84013 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
84014 +{
84015 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84016 +
84017 + atomic64_add_unchecked(i, v);
84018 +}
84019 +#endif
84020 +
84021 static inline void atomic_long_sub(long i, atomic_long_t *l)
84022 {
84023 atomic64_t *v = (atomic64_t *)l;
84024 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
84025 return (long)atomic64_inc_return(v);
84026 }
84027
84028 +#ifdef CONFIG_PAX_REFCOUNT
84029 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
84030 +{
84031 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84032 +
84033 + return (long)atomic64_inc_return_unchecked(v);
84034 +}
84035 +#endif
84036 +
84037 static inline long atomic_long_dec_return(atomic_long_t *l)
84038 {
84039 atomic64_t *v = (atomic64_t *)l;
84040 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
84041
84042 typedef atomic_t atomic_long_t;
84043
84044 +#ifdef CONFIG_PAX_REFCOUNT
84045 +typedef atomic_unchecked_t atomic_long_unchecked_t;
84046 +#else
84047 +typedef atomic_t atomic_long_unchecked_t;
84048 +#endif
84049 +
84050 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
84051 static inline long atomic_long_read(atomic_long_t *l)
84052 {
84053 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
84054 return (long)atomic_read(v);
84055 }
84056
84057 +#ifdef CONFIG_PAX_REFCOUNT
84058 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
84059 +{
84060 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84061 +
84062 + return (long)atomic_read_unchecked(v);
84063 +}
84064 +#endif
84065 +
84066 static inline void atomic_long_set(atomic_long_t *l, long i)
84067 {
84068 atomic_t *v = (atomic_t *)l;
84069 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
84070 atomic_set(v, i);
84071 }
84072
84073 +#ifdef CONFIG_PAX_REFCOUNT
84074 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
84075 +{
84076 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84077 +
84078 + atomic_set_unchecked(v, i);
84079 +}
84080 +#endif
84081 +
84082 static inline void atomic_long_inc(atomic_long_t *l)
84083 {
84084 atomic_t *v = (atomic_t *)l;
84085 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
84086 atomic_inc(v);
84087 }
84088
84089 +#ifdef CONFIG_PAX_REFCOUNT
84090 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
84091 +{
84092 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84093 +
84094 + atomic_inc_unchecked(v);
84095 +}
84096 +#endif
84097 +
84098 static inline void atomic_long_dec(atomic_long_t *l)
84099 {
84100 atomic_t *v = (atomic_t *)l;
84101 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
84102 atomic_dec(v);
84103 }
84104
84105 +#ifdef CONFIG_PAX_REFCOUNT
84106 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
84107 +{
84108 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84109 +
84110 + atomic_dec_unchecked(v);
84111 +}
84112 +#endif
84113 +
84114 static inline void atomic_long_add(long i, atomic_long_t *l)
84115 {
84116 atomic_t *v = (atomic_t *)l;
84117 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
84118 atomic_add(i, v);
84119 }
84120
84121 +#ifdef CONFIG_PAX_REFCOUNT
84122 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
84123 +{
84124 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84125 +
84126 + atomic_add_unchecked(i, v);
84127 +}
84128 +#endif
84129 +
84130 static inline void atomic_long_sub(long i, atomic_long_t *l)
84131 {
84132 atomic_t *v = (atomic_t *)l;
84133 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
84134 return (long)atomic_inc_return(v);
84135 }
84136
84137 +#ifdef CONFIG_PAX_REFCOUNT
84138 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
84139 +{
84140 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84141 +
84142 + return (long)atomic_inc_return_unchecked(v);
84143 +}
84144 +#endif
84145 +
84146 static inline long atomic_long_dec_return(atomic_long_t *l)
84147 {
84148 atomic_t *v = (atomic_t *)l;
84149 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
84150
84151 #endif /* BITS_PER_LONG == 64 */
84152
84153 +#ifdef CONFIG_PAX_REFCOUNT
84154 +static inline void pax_refcount_needs_these_functions(void)
84155 +{
84156 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
84157 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
84158 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
84159 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
84160 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
84161 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
84162 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
84163 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
84164 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
84165 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
84166 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
84167 +
84168 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
84169 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
84170 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
84171 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
84172 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
84173 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
84174 +}
84175 +#else
84176 +#define atomic_read_unchecked(v) atomic_read(v)
84177 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
84178 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
84179 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
84180 +#define atomic_inc_unchecked(v) atomic_inc(v)
84181 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
84182 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
84183 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
84184 +#define atomic_dec_unchecked(v) atomic_dec(v)
84185 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
84186 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
84187 +
84188 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
84189 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
84190 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
84191 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
84192 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
84193 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
84194 +#endif
84195 +
84196 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
84197 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
84198 index b18ce4f..2ee2843 100644
84199 --- a/include/asm-generic/atomic64.h
84200 +++ b/include/asm-generic/atomic64.h
84201 @@ -16,6 +16,8 @@ typedef struct {
84202 long long counter;
84203 } atomic64_t;
84204
84205 +typedef atomic64_t atomic64_unchecked_t;
84206 +
84207 #define ATOMIC64_INIT(i) { (i) }
84208
84209 extern long long atomic64_read(const atomic64_t *v);
84210 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
84211 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
84212 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
84213
84214 +#define atomic64_read_unchecked(v) atomic64_read(v)
84215 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
84216 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
84217 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
84218 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
84219 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
84220 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
84221 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
84222 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
84223 +
84224 #endif /* _ASM_GENERIC_ATOMIC64_H */
84225 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
84226 index d48ddf0..656a0ac 100644
84227 --- a/include/asm-generic/bug.h
84228 +++ b/include/asm-generic/bug.h
84229 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
84230
84231 #else /* !CONFIG_BUG */
84232 #ifndef HAVE_ARCH_BUG
84233 -#define BUG() do {} while(0)
84234 +#define BUG() do { for (;;) ; } while(0)
84235 #endif
84236
84237 #ifndef HAVE_ARCH_BUG_ON
84238 -#define BUG_ON(condition) do { if (condition) ; } while(0)
84239 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
84240 #endif
84241
84242 #ifndef HAVE_ARCH_WARN_ON
84243 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
84244 index 1bfcfe5..e04c5c9 100644
84245 --- a/include/asm-generic/cache.h
84246 +++ b/include/asm-generic/cache.h
84247 @@ -6,7 +6,7 @@
84248 * cache lines need to provide their own cache.h.
84249 */
84250
84251 -#define L1_CACHE_SHIFT 5
84252 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
84253 +#define L1_CACHE_SHIFT 5UL
84254 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
84255
84256 #endif /* __ASM_GENERIC_CACHE_H */
84257 diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
84258 index 6920695..41038bc 100644
84259 --- a/include/asm-generic/dma-mapping-common.h
84260 +++ b/include/asm-generic/dma-mapping-common.h
84261 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
84262 enum dma_data_direction dir,
84263 struct dma_attrs *attrs)
84264 {
84265 - struct dma_map_ops *ops = get_dma_ops(dev);
84266 + const struct dma_map_ops *ops = get_dma_ops(dev);
84267 dma_addr_t addr;
84268
84269 kmemcheck_mark_initialized(ptr, size);
84270 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
84271 enum dma_data_direction dir,
84272 struct dma_attrs *attrs)
84273 {
84274 - struct dma_map_ops *ops = get_dma_ops(dev);
84275 + const struct dma_map_ops *ops = get_dma_ops(dev);
84276
84277 BUG_ON(!valid_dma_direction(dir));
84278 if (ops->unmap_page)
84279 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
84280 int nents, enum dma_data_direction dir,
84281 struct dma_attrs *attrs)
84282 {
84283 - struct dma_map_ops *ops = get_dma_ops(dev);
84284 + const struct dma_map_ops *ops = get_dma_ops(dev);
84285 int i, ents;
84286 struct scatterlist *s;
84287
84288 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
84289 int nents, enum dma_data_direction dir,
84290 struct dma_attrs *attrs)
84291 {
84292 - struct dma_map_ops *ops = get_dma_ops(dev);
84293 + const struct dma_map_ops *ops = get_dma_ops(dev);
84294
84295 BUG_ON(!valid_dma_direction(dir));
84296 debug_dma_unmap_sg(dev, sg, nents, dir);
84297 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
84298 size_t offset, size_t size,
84299 enum dma_data_direction dir)
84300 {
84301 - struct dma_map_ops *ops = get_dma_ops(dev);
84302 + const struct dma_map_ops *ops = get_dma_ops(dev);
84303 dma_addr_t addr;
84304
84305 kmemcheck_mark_initialized(page_address(page) + offset, size);
84306 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
84307 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
84308 size_t size, enum dma_data_direction dir)
84309 {
84310 - struct dma_map_ops *ops = get_dma_ops(dev);
84311 + const struct dma_map_ops *ops = get_dma_ops(dev);
84312
84313 BUG_ON(!valid_dma_direction(dir));
84314 if (ops->unmap_page)
84315 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
84316 size_t size,
84317 enum dma_data_direction dir)
84318 {
84319 - struct dma_map_ops *ops = get_dma_ops(dev);
84320 + const struct dma_map_ops *ops = get_dma_ops(dev);
84321
84322 BUG_ON(!valid_dma_direction(dir));
84323 if (ops->sync_single_for_cpu)
84324 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
84325 dma_addr_t addr, size_t size,
84326 enum dma_data_direction dir)
84327 {
84328 - struct dma_map_ops *ops = get_dma_ops(dev);
84329 + const struct dma_map_ops *ops = get_dma_ops(dev);
84330
84331 BUG_ON(!valid_dma_direction(dir));
84332 if (ops->sync_single_for_device)
84333 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
84334 size_t size,
84335 enum dma_data_direction dir)
84336 {
84337 - struct dma_map_ops *ops = get_dma_ops(dev);
84338 + const struct dma_map_ops *ops = get_dma_ops(dev);
84339
84340 BUG_ON(!valid_dma_direction(dir));
84341 if (ops->sync_single_range_for_cpu) {
84342 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
84343 size_t size,
84344 enum dma_data_direction dir)
84345 {
84346 - struct dma_map_ops *ops = get_dma_ops(dev);
84347 + const struct dma_map_ops *ops = get_dma_ops(dev);
84348
84349 BUG_ON(!valid_dma_direction(dir));
84350 if (ops->sync_single_range_for_device) {
84351 @@ -155,7 +155,7 @@ static inline void
84352 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
84353 int nelems, enum dma_data_direction dir)
84354 {
84355 - struct dma_map_ops *ops = get_dma_ops(dev);
84356 + const struct dma_map_ops *ops = get_dma_ops(dev);
84357
84358 BUG_ON(!valid_dma_direction(dir));
84359 if (ops->sync_sg_for_cpu)
84360 @@ -167,7 +167,7 @@ static inline void
84361 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
84362 int nelems, enum dma_data_direction dir)
84363 {
84364 - struct dma_map_ops *ops = get_dma_ops(dev);
84365 + const struct dma_map_ops *ops = get_dma_ops(dev);
84366
84367 BUG_ON(!valid_dma_direction(dir));
84368 if (ops->sync_sg_for_device)
84369 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
84370 index 0d68a1e..b74a761 100644
84371 --- a/include/asm-generic/emergency-restart.h
84372 +++ b/include/asm-generic/emergency-restart.h
84373 @@ -1,7 +1,7 @@
84374 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
84375 #define _ASM_GENERIC_EMERGENCY_RESTART_H
84376
84377 -static inline void machine_emergency_restart(void)
84378 +static inline __noreturn void machine_emergency_restart(void)
84379 {
84380 machine_restart(NULL);
84381 }
84382 diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
84383 index 3c2344f..4590a7d 100644
84384 --- a/include/asm-generic/futex.h
84385 +++ b/include/asm-generic/futex.h
84386 @@ -6,7 +6,7 @@
84387 #include <asm/errno.h>
84388
84389 static inline int
84390 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
84391 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
84392 {
84393 int op = (encoded_op >> 28) & 7;
84394 int cmp = (encoded_op >> 24) & 15;
84395 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
84396 }
84397
84398 static inline int
84399 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
84400 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
84401 {
84402 return -ENOSYS;
84403 }
84404 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
84405 index e5f234a..cdb16b3 100644
84406 --- a/include/asm-generic/kmap_types.h
84407 +++ b/include/asm-generic/kmap_types.h
84408 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
84409 KMAP_D(16) KM_IRQ_PTE,
84410 KMAP_D(17) KM_NMI,
84411 KMAP_D(18) KM_NMI_PTE,
84412 -KMAP_D(19) KM_TYPE_NR
84413 +KMAP_D(19) KM_CLEARPAGE,
84414 +KMAP_D(20) KM_TYPE_NR
84415 };
84416
84417 #undef KMAP_D
84418 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
84419 index fc21844..2ee9629 100644
84420 --- a/include/asm-generic/local.h
84421 +++ b/include/asm-generic/local.h
84422 @@ -39,6 +39,7 @@ typedef struct
84423 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
84424 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
84425 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
84426 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
84427
84428 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
84429 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
84430 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
84431 index 725612b..9cc513a 100644
84432 --- a/include/asm-generic/pgtable-nopmd.h
84433 +++ b/include/asm-generic/pgtable-nopmd.h
84434 @@ -1,14 +1,19 @@
84435 #ifndef _PGTABLE_NOPMD_H
84436 #define _PGTABLE_NOPMD_H
84437
84438 -#ifndef __ASSEMBLY__
84439 -
84440 #include <asm-generic/pgtable-nopud.h>
84441
84442 -struct mm_struct;
84443 -
84444 #define __PAGETABLE_PMD_FOLDED
84445
84446 +#define PMD_SHIFT PUD_SHIFT
84447 +#define PTRS_PER_PMD 1
84448 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
84449 +#define PMD_MASK (~(PMD_SIZE-1))
84450 +
84451 +#ifndef __ASSEMBLY__
84452 +
84453 +struct mm_struct;
84454 +
84455 /*
84456 * Having the pmd type consist of a pud gets the size right, and allows
84457 * us to conceptually access the pud entry that this pmd is folded into
84458 @@ -16,11 +21,6 @@ struct mm_struct;
84459 */
84460 typedef struct { pud_t pud; } pmd_t;
84461
84462 -#define PMD_SHIFT PUD_SHIFT
84463 -#define PTRS_PER_PMD 1
84464 -#define PMD_SIZE (1UL << PMD_SHIFT)
84465 -#define PMD_MASK (~(PMD_SIZE-1))
84466 -
84467 /*
84468 * The "pud_xxx()" functions here are trivial for a folded two-level
84469 * setup: the pmd is never bad, and a pmd always exists (as it's folded
84470 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
84471 index 810431d..ccc3638 100644
84472 --- a/include/asm-generic/pgtable-nopud.h
84473 +++ b/include/asm-generic/pgtable-nopud.h
84474 @@ -1,10 +1,15 @@
84475 #ifndef _PGTABLE_NOPUD_H
84476 #define _PGTABLE_NOPUD_H
84477
84478 -#ifndef __ASSEMBLY__
84479 -
84480 #define __PAGETABLE_PUD_FOLDED
84481
84482 +#define PUD_SHIFT PGDIR_SHIFT
84483 +#define PTRS_PER_PUD 1
84484 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
84485 +#define PUD_MASK (~(PUD_SIZE-1))
84486 +
84487 +#ifndef __ASSEMBLY__
84488 +
84489 /*
84490 * Having the pud type consist of a pgd gets the size right, and allows
84491 * us to conceptually access the pgd entry that this pud is folded into
84492 @@ -12,11 +17,6 @@
84493 */
84494 typedef struct { pgd_t pgd; } pud_t;
84495
84496 -#define PUD_SHIFT PGDIR_SHIFT
84497 -#define PTRS_PER_PUD 1
84498 -#define PUD_SIZE (1UL << PUD_SHIFT)
84499 -#define PUD_MASK (~(PUD_SIZE-1))
84500 -
84501 /*
84502 * The "pgd_xxx()" functions here are trivial for a folded two-level
84503 * setup: the pud is never bad, and a pud always exists (as it's folded
84504 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
84505 index e2bd73e..fea8ed3 100644
84506 --- a/include/asm-generic/pgtable.h
84507 +++ b/include/asm-generic/pgtable.h
84508 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
84509 unsigned long size);
84510 #endif
84511
84512 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
84513 +static inline unsigned long pax_open_kernel(void) { return 0; }
84514 +#endif
84515 +
84516 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
84517 +static inline unsigned long pax_close_kernel(void) { return 0; }
84518 +#endif
84519 +
84520 #endif /* !__ASSEMBLY__ */
84521
84522 #endif /* _ASM_GENERIC_PGTABLE_H */
84523 diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
84524 index b218b85..f0ac13a 100644
84525 --- a/include/asm-generic/uaccess.h
84526 +++ b/include/asm-generic/uaccess.h
84527 @@ -76,6 +76,8 @@ extern unsigned long search_exception_table(unsigned long);
84528 */
84529 #ifndef __copy_from_user
84530 static inline __must_check long __copy_from_user(void *to,
84531 + const void __user * from, unsigned long n) __size_overflow(3);
84532 +static inline __must_check long __copy_from_user(void *to,
84533 const void __user * from, unsigned long n)
84534 {
84535 if (__builtin_constant_p(n)) {
84536 @@ -106,6 +108,8 @@ static inline __must_check long __copy_from_user(void *to,
84537
84538 #ifndef __copy_to_user
84539 static inline __must_check long __copy_to_user(void __user *to,
84540 + const void *from, unsigned long n) __size_overflow(3);
84541 +static inline __must_check long __copy_to_user(void __user *to,
84542 const void *from, unsigned long n)
84543 {
84544 if (__builtin_constant_p(n)) {
84545 @@ -224,6 +228,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
84546 -EFAULT; \
84547 })
84548
84549 +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) __size_overflow(1);
84550 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
84551 {
84552 size = __copy_from_user(x, ptr, size);
84553 @@ -240,6 +245,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
84554 #define __copy_to_user_inatomic __copy_to_user
84555 #endif
84556
84557 +static inline long copy_from_user(void *to, const void __user * from, unsigned long n) __size_overflow(3);
84558 static inline long copy_from_user(void *to,
84559 const void __user * from, unsigned long n)
84560 {
84561 @@ -250,6 +256,7 @@ static inline long copy_from_user(void *to,
84562 return n;
84563 }
84564
84565 +static inline long copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
84566 static inline long copy_to_user(void __user *to,
84567 const void *from, unsigned long n)
84568 {
84569 @@ -265,6 +272,8 @@ static inline long copy_to_user(void __user *to,
84570 */
84571 #ifndef __strncpy_from_user
84572 static inline long
84573 +__strncpy_from_user(char *dst, const char __user *src, unsigned long count) __size_overflow(3);
84574 +static inline long
84575 __strncpy_from_user(char *dst, const char __user *src, long count)
84576 {
84577 char *tmp;
84578 @@ -276,6 +285,8 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
84579 #endif
84580
84581 static inline long
84582 +strncpy_from_user(char *dst, const char __user *src, unsigned long count) __size_overflow(3);
84583 +static inline long
84584 strncpy_from_user(char *dst, const char __user *src, long count)
84585 {
84586 if (!access_ok(VERIFY_READ, src, 1))
84587 @@ -289,6 +300,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
84588 * Return 0 on exception, a value greater than N if too long
84589 */
84590 #ifndef strnlen_user
84591 +static inline long strnlen_user(const char __user *src, unsigned long n) __size_overflow(2);
84592 static inline long strnlen_user(const char __user *src, long n)
84593 {
84594 if (!access_ok(VERIFY_READ, src, 1))
84595 @@ -307,6 +319,8 @@ static inline long strlen_user(const char __user *src)
84596 */
84597 #ifndef __clear_user
84598 static inline __must_check unsigned long
84599 +__clear_user(void __user *to, unsigned long n) __size_overflow(2);
84600 +static inline __must_check unsigned long
84601 __clear_user(void __user *to, unsigned long n)
84602 {
84603 memset((void __force *)to, 0, n);
84604 @@ -315,6 +329,8 @@ __clear_user(void __user *to, unsigned long n)
84605 #endif
84606
84607 static inline __must_check unsigned long
84608 +clear_user(void __user *to, unsigned long n) __size_overflow(2);
84609 +static inline __must_check unsigned long
84610 clear_user(void __user *to, unsigned long n)
84611 {
84612 might_sleep();
84613 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
84614 index b6e818f..21aa58a 100644
84615 --- a/include/asm-generic/vmlinux.lds.h
84616 +++ b/include/asm-generic/vmlinux.lds.h
84617 @@ -199,6 +199,7 @@
84618 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
84619 VMLINUX_SYMBOL(__start_rodata) = .; \
84620 *(.rodata) *(.rodata.*) \
84621 + *(.data.read_only) \
84622 *(__vermagic) /* Kernel version magic */ \
84623 *(__markers_strings) /* Markers: strings */ \
84624 *(__tracepoints_strings)/* Tracepoints: strings */ \
84625 @@ -656,22 +657,24 @@
84626 * section in the linker script will go there too. @phdr should have
84627 * a leading colon.
84628 *
84629 - * Note that this macros defines __per_cpu_load as an absolute symbol.
84630 + * Note that this macros defines per_cpu_load as an absolute symbol.
84631 * If there is no need to put the percpu section at a predetermined
84632 * address, use PERCPU().
84633 */
84634 #define PERCPU_VADDR(vaddr, phdr) \
84635 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
84636 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
84637 + per_cpu_load = .; \
84638 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
84639 - LOAD_OFFSET) { \
84640 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
84641 VMLINUX_SYMBOL(__per_cpu_start) = .; \
84642 *(.data.percpu.first) \
84643 - *(.data.percpu.page_aligned) \
84644 *(.data.percpu) \
84645 + . = ALIGN(PAGE_SIZE); \
84646 + *(.data.percpu.page_aligned) \
84647 *(.data.percpu.shared_aligned) \
84648 VMLINUX_SYMBOL(__per_cpu_end) = .; \
84649 } phdr \
84650 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
84651 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
84652
84653 /**
84654 * PERCPU - define output section for percpu area, simple version
84655 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
84656 index ebab6a6..351dba1 100644
84657 --- a/include/drm/drmP.h
84658 +++ b/include/drm/drmP.h
84659 @@ -71,6 +71,7 @@
84660 #include <linux/workqueue.h>
84661 #include <linux/poll.h>
84662 #include <asm/pgalloc.h>
84663 +#include <asm/local.h>
84664 #include "drm.h"
84665
84666 #include <linux/idr.h>
84667 @@ -814,7 +815,7 @@ struct drm_driver {
84668 void (*vgaarb_irq)(struct drm_device *dev, bool state);
84669
84670 /* Driver private ops for this object */
84671 - struct vm_operations_struct *gem_vm_ops;
84672 + const struct vm_operations_struct *gem_vm_ops;
84673
84674 int major;
84675 int minor;
84676 @@ -917,7 +918,7 @@ struct drm_device {
84677
84678 /** \name Usage Counters */
84679 /*@{ */
84680 - int open_count; /**< Outstanding files open */
84681 + local_t open_count; /**< Outstanding files open */
84682 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
84683 atomic_t vma_count; /**< Outstanding vma areas open */
84684 int buf_use; /**< Buffers in use -- cannot alloc */
84685 @@ -928,7 +929,7 @@ struct drm_device {
84686 /*@{ */
84687 unsigned long counters;
84688 enum drm_stat_type types[15];
84689 - atomic_t counts[15];
84690 + atomic_unchecked_t counts[15];
84691 /*@} */
84692
84693 struct list_head filelist;
84694 @@ -1016,7 +1017,7 @@ struct drm_device {
84695 struct pci_controller *hose;
84696 #endif
84697 struct drm_sg_mem *sg; /**< Scatter gather memory */
84698 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
84699 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
84700 void *dev_private; /**< device private data */
84701 void *mm_private;
84702 struct address_space *dev_mapping;
84703 @@ -1042,11 +1043,11 @@ struct drm_device {
84704 spinlock_t object_name_lock;
84705 struct idr object_name_idr;
84706 atomic_t object_count;
84707 - atomic_t object_memory;
84708 + atomic_unchecked_t object_memory;
84709 atomic_t pin_count;
84710 - atomic_t pin_memory;
84711 + atomic_unchecked_t pin_memory;
84712 atomic_t gtt_count;
84713 - atomic_t gtt_memory;
84714 + atomic_unchecked_t gtt_memory;
84715 uint32_t gtt_total;
84716 uint32_t invalidate_domains; /* domains pending invalidation */
84717 uint32_t flush_domains; /* domains pending flush */
84718 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
84719 index b29e201..3413cc9 100644
84720 --- a/include/drm/drm_crtc_helper.h
84721 +++ b/include/drm/drm_crtc_helper.h
84722 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
84723
84724 /* reload the current crtc LUT */
84725 void (*load_lut)(struct drm_crtc *crtc);
84726 -};
84727 +} __no_const;
84728
84729 struct drm_encoder_helper_funcs {
84730 void (*dpms)(struct drm_encoder *encoder, int mode);
84731 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
84732 struct drm_connector *connector);
84733 /* disable encoder when not in use - more explicit than dpms off */
84734 void (*disable)(struct drm_encoder *encoder);
84735 -};
84736 +} __no_const;
84737
84738 struct drm_connector_helper_funcs {
84739 int (*get_modes)(struct drm_connector *connector);
84740 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
84741 index b199170..6f9e64c 100644
84742 --- a/include/drm/ttm/ttm_memory.h
84743 +++ b/include/drm/ttm/ttm_memory.h
84744 @@ -47,7 +47,7 @@
84745
84746 struct ttm_mem_shrink {
84747 int (*do_shrink) (struct ttm_mem_shrink *);
84748 -};
84749 +} __no_const;
84750
84751 /**
84752 * struct ttm_mem_global - Global memory accounting structure.
84753 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
84754 index e86dfca..40cc55f 100644
84755 --- a/include/linux/a.out.h
84756 +++ b/include/linux/a.out.h
84757 @@ -39,6 +39,14 @@ enum machine_type {
84758 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
84759 };
84760
84761 +/* Constants for the N_FLAGS field */
84762 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
84763 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
84764 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
84765 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
84766 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
84767 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
84768 +
84769 #if !defined (N_MAGIC)
84770 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
84771 #endif
84772 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
84773 index 817b237..62c10bc 100644
84774 --- a/include/linux/atmdev.h
84775 +++ b/include/linux/atmdev.h
84776 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
84777 #endif
84778
84779 struct k_atm_aal_stats {
84780 -#define __HANDLE_ITEM(i) atomic_t i
84781 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
84782 __AAL_STAT_ITEMS
84783 #undef __HANDLE_ITEM
84784 };
84785 diff --git a/include/linux/backlight.h b/include/linux/backlight.h
84786 index 0f5f578..8c4f884 100644
84787 --- a/include/linux/backlight.h
84788 +++ b/include/linux/backlight.h
84789 @@ -36,18 +36,18 @@ struct backlight_device;
84790 struct fb_info;
84791
84792 struct backlight_ops {
84793 - unsigned int options;
84794 + const unsigned int options;
84795
84796 #define BL_CORE_SUSPENDRESUME (1 << 0)
84797
84798 /* Notify the backlight driver some property has changed */
84799 - int (*update_status)(struct backlight_device *);
84800 + int (* const update_status)(struct backlight_device *);
84801 /* Return the current backlight brightness (accounting for power,
84802 fb_blank etc.) */
84803 - int (*get_brightness)(struct backlight_device *);
84804 + int (* const get_brightness)(struct backlight_device *);
84805 /* Check if given framebuffer device is the one bound to this backlight;
84806 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
84807 - int (*check_fb)(struct fb_info *);
84808 + int (* const check_fb)(struct fb_info *);
84809 };
84810
84811 /* This structure defines all the properties of a backlight */
84812 @@ -86,7 +86,7 @@ struct backlight_device {
84813 registered this device has been unloaded, and if class_get_devdata()
84814 points to something in the body of that driver, it is also invalid. */
84815 struct mutex ops_lock;
84816 - struct backlight_ops *ops;
84817 + const struct backlight_ops *ops;
84818
84819 /* The framebuffer notifier block */
84820 struct notifier_block fb_notif;
84821 @@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
84822 }
84823
84824 extern struct backlight_device *backlight_device_register(const char *name,
84825 - struct device *dev, void *devdata, struct backlight_ops *ops);
84826 + struct device *dev, void *devdata, const struct backlight_ops *ops);
84827 extern void backlight_device_unregister(struct backlight_device *bd);
84828 extern void backlight_force_update(struct backlight_device *bd,
84829 enum backlight_update_reason reason);
84830 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
84831 index a3d802e..93a2ef4 100644
84832 --- a/include/linux/binfmts.h
84833 +++ b/include/linux/binfmts.h
84834 @@ -18,7 +18,7 @@ struct pt_regs;
84835 #define BINPRM_BUF_SIZE 128
84836
84837 #ifdef __KERNEL__
84838 -#include <linux/list.h>
84839 +#include <linux/sched.h>
84840
84841 #define CORENAME_MAX_SIZE 128
84842
84843 @@ -58,6 +58,7 @@ struct linux_binprm{
84844 unsigned interp_flags;
84845 unsigned interp_data;
84846 unsigned long loader, exec;
84847 + char tcomm[TASK_COMM_LEN];
84848 };
84849
84850 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
84851 @@ -83,6 +84,7 @@ struct linux_binfmt {
84852 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
84853 int (*load_shlib)(struct file *);
84854 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
84855 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
84856 unsigned long min_coredump; /* minimal dump size */
84857 int hasvdso;
84858 };
84859 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
84860 index 5eb6cb0..a2906d2 100644
84861 --- a/include/linux/blkdev.h
84862 +++ b/include/linux/blkdev.h
84863 @@ -1281,7 +1281,7 @@ struct block_device_operations {
84864 int (*revalidate_disk) (struct gendisk *);
84865 int (*getgeo)(struct block_device *, struct hd_geometry *);
84866 struct module *owner;
84867 -};
84868 +} __do_const;
84869
84870 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
84871 unsigned long);
84872 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
84873 index 3b73b99..629d21b 100644
84874 --- a/include/linux/blktrace_api.h
84875 +++ b/include/linux/blktrace_api.h
84876 @@ -160,7 +160,7 @@ struct blk_trace {
84877 struct dentry *dir;
84878 struct dentry *dropped_file;
84879 struct dentry *msg_file;
84880 - atomic_t dropped;
84881 + atomic_unchecked_t dropped;
84882 };
84883
84884 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
84885 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
84886 index 83195fb..0b0f77d 100644
84887 --- a/include/linux/byteorder/little_endian.h
84888 +++ b/include/linux/byteorder/little_endian.h
84889 @@ -42,51 +42,51 @@
84890
84891 static inline __le64 __cpu_to_le64p(const __u64 *p)
84892 {
84893 - return (__force __le64)*p;
84894 + return (__force const __le64)*p;
84895 }
84896 static inline __u64 __le64_to_cpup(const __le64 *p)
84897 {
84898 - return (__force __u64)*p;
84899 + return (__force const __u64)*p;
84900 }
84901 static inline __le32 __cpu_to_le32p(const __u32 *p)
84902 {
84903 - return (__force __le32)*p;
84904 + return (__force const __le32)*p;
84905 }
84906 static inline __u32 __le32_to_cpup(const __le32 *p)
84907 {
84908 - return (__force __u32)*p;
84909 + return (__force const __u32)*p;
84910 }
84911 static inline __le16 __cpu_to_le16p(const __u16 *p)
84912 {
84913 - return (__force __le16)*p;
84914 + return (__force const __le16)*p;
84915 }
84916 static inline __u16 __le16_to_cpup(const __le16 *p)
84917 {
84918 - return (__force __u16)*p;
84919 + return (__force const __u16)*p;
84920 }
84921 static inline __be64 __cpu_to_be64p(const __u64 *p)
84922 {
84923 - return (__force __be64)__swab64p(p);
84924 + return (__force const __be64)__swab64p(p);
84925 }
84926 static inline __u64 __be64_to_cpup(const __be64 *p)
84927 {
84928 - return __swab64p((__u64 *)p);
84929 + return __swab64p((const __u64 *)p);
84930 }
84931 static inline __be32 __cpu_to_be32p(const __u32 *p)
84932 {
84933 - return (__force __be32)__swab32p(p);
84934 + return (__force const __be32)__swab32p(p);
84935 }
84936 static inline __u32 __be32_to_cpup(const __be32 *p)
84937 {
84938 - return __swab32p((__u32 *)p);
84939 + return __swab32p((const __u32 *)p);
84940 }
84941 static inline __be16 __cpu_to_be16p(const __u16 *p)
84942 {
84943 - return (__force __be16)__swab16p(p);
84944 + return (__force const __be16)__swab16p(p);
84945 }
84946 static inline __u16 __be16_to_cpup(const __be16 *p)
84947 {
84948 - return __swab16p((__u16 *)p);
84949 + return __swab16p((const __u16 *)p);
84950 }
84951 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
84952 #define __le64_to_cpus(x) do { (void)(x); } while (0)
84953 diff --git a/include/linux/cache.h b/include/linux/cache.h
84954 index 97e2488..e7576b9 100644
84955 --- a/include/linux/cache.h
84956 +++ b/include/linux/cache.h
84957 @@ -16,6 +16,10 @@
84958 #define __read_mostly
84959 #endif
84960
84961 +#ifndef __read_only
84962 +#define __read_only __read_mostly
84963 +#endif
84964 +
84965 #ifndef ____cacheline_aligned
84966 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
84967 #endif
84968 diff --git a/include/linux/capability.h b/include/linux/capability.h
84969 index c8f2a5f7..1618a5c 100644
84970 --- a/include/linux/capability.h
84971 +++ b/include/linux/capability.h
84972 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
84973 (security_real_capable_noaudit((t), (cap)) == 0)
84974
84975 extern int capable(int cap);
84976 +int capable_nolog(int cap);
84977
84978 /* audit system wants to get cap info from files as well */
84979 struct dentry;
84980 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
84981 index 450fa59..246fa19 100644
84982 --- a/include/linux/compiler-gcc4.h
84983 +++ b/include/linux/compiler-gcc4.h
84984 @@ -14,6 +14,9 @@
84985 #define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
84986 #define __always_inline inline __attribute__((always_inline))
84987
84988 +#ifdef SIZE_OVERFLOW_PLUGIN
84989 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
84990 +#endif
84991 /*
84992 * A trick to suppress uninitialized variable warning without generating any
84993 * code
84994 @@ -36,4 +39,16 @@
84995 the kernel context */
84996 #define __cold __attribute__((__cold__))
84997
84998 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
84999 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
85000 +#define __bos0(ptr) __bos((ptr), 0)
85001 +#define __bos1(ptr) __bos((ptr), 1)
85002 +
85003 +#if __GNUC_MINOR__ >= 5
85004 +#ifdef CONSTIFY_PLUGIN
85005 +#define __no_const __attribute__((no_const))
85006 +#define __do_const __attribute__((do_const))
85007 +#endif
85008 +#endif
85009 +
85010 #endif
85011 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
85012 index 04fb513..6189f3b 100644
85013 --- a/include/linux/compiler.h
85014 +++ b/include/linux/compiler.h
85015 @@ -5,11 +5,14 @@
85016
85017 #ifdef __CHECKER__
85018 # define __user __attribute__((noderef, address_space(1)))
85019 +# define __force_user __force __user
85020 # define __kernel /* default address space */
85021 +# define __force_kernel __force __kernel
85022 # define __safe __attribute__((safe))
85023 # define __force __attribute__((force))
85024 # define __nocast __attribute__((nocast))
85025 # define __iomem __attribute__((noderef, address_space(2)))
85026 +# define __force_iomem __force __iomem
85027 # define __acquires(x) __attribute__((context(x,0,1)))
85028 # define __releases(x) __attribute__((context(x,1,0)))
85029 # define __acquire(x) __context__(x,1)
85030 @@ -17,13 +20,34 @@
85031 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
85032 extern void __chk_user_ptr(const volatile void __user *);
85033 extern void __chk_io_ptr(const volatile void __iomem *);
85034 +#elif defined(CHECKER_PLUGIN)
85035 +//# define __user
85036 +//# define __force_user
85037 +//# define __kernel
85038 +//# define __force_kernel
85039 +# define __safe
85040 +# define __force
85041 +# define __nocast
85042 +# define __iomem
85043 +# define __force_iomem
85044 +# define __chk_user_ptr(x) (void)0
85045 +# define __chk_io_ptr(x) (void)0
85046 +# define __builtin_warning(x, y...) (1)
85047 +# define __acquires(x)
85048 +# define __releases(x)
85049 +# define __acquire(x) (void)0
85050 +# define __release(x) (void)0
85051 +# define __cond_lock(x,c) (c)
85052 #else
85053 # define __user
85054 +# define __force_user
85055 # define __kernel
85056 +# define __force_kernel
85057 # define __safe
85058 # define __force
85059 # define __nocast
85060 # define __iomem
85061 +# define __force_iomem
85062 # define __chk_user_ptr(x) (void)0
85063 # define __chk_io_ptr(x) (void)0
85064 # define __builtin_warning(x, y...) (1)
85065 @@ -247,6 +271,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
85066 # define __attribute_const__ /* unimplemented */
85067 #endif
85068
85069 +#ifndef __no_const
85070 +# define __no_const
85071 +#endif
85072 +
85073 +#ifndef __do_const
85074 +# define __do_const
85075 +#endif
85076 +
85077 +#ifndef __size_overflow
85078 +# define __size_overflow(...)
85079 +#endif
85080 /*
85081 * Tell gcc if a function is cold. The compiler will assume any path
85082 * directly leading to the call is unlikely.
85083 @@ -256,6 +291,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
85084 #define __cold
85085 #endif
85086
85087 +#ifndef __alloc_size
85088 +#define __alloc_size(...)
85089 +#endif
85090 +
85091 +#ifndef __bos
85092 +#define __bos(ptr, arg)
85093 +#endif
85094 +
85095 +#ifndef __bos0
85096 +#define __bos0(ptr)
85097 +#endif
85098 +
85099 +#ifndef __bos1
85100 +#define __bos1(ptr)
85101 +#endif
85102 +
85103 /* Simple shorthand for a section definition */
85104 #ifndef __section
85105 # define __section(S) __attribute__ ((__section__(#S)))
85106 @@ -278,6 +329,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
85107 * use is to mediate communication between process-level code and irq/NMI
85108 * handlers, all running on the same CPU.
85109 */
85110 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
85111 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
85112 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
85113
85114 #endif /* __LINUX_COMPILER_H */
85115 diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
85116 index 0026f26..6c237c5 100644
85117 --- a/include/linux/crash_dump.h
85118 +++ b/include/linux/crash_dump.h
85119 @@ -12,7 +12,7 @@
85120 extern unsigned long long elfcorehdr_addr;
85121
85122 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
85123 - unsigned long, int);
85124 + unsigned long, int) __size_overflow(3);
85125
85126 /* Architecture code defines this if there are other possible ELF
85127 * machine types, e.g. on bi-arch capable hardware. */
85128 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
85129 index fd92988..a3164bd 100644
85130 --- a/include/linux/crypto.h
85131 +++ b/include/linux/crypto.h
85132 @@ -394,7 +394,7 @@ struct cipher_tfm {
85133 const u8 *key, unsigned int keylen);
85134 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
85135 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
85136 -};
85137 +} __no_const;
85138
85139 struct hash_tfm {
85140 int (*init)(struct hash_desc *desc);
85141 @@ -415,13 +415,13 @@ struct compress_tfm {
85142 int (*cot_decompress)(struct crypto_tfm *tfm,
85143 const u8 *src, unsigned int slen,
85144 u8 *dst, unsigned int *dlen);
85145 -};
85146 +} __no_const;
85147
85148 struct rng_tfm {
85149 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
85150 unsigned int dlen);
85151 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
85152 -};
85153 +} __no_const;
85154
85155 #define crt_ablkcipher crt_u.ablkcipher
85156 #define crt_aead crt_u.aead
85157 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
85158 index 30b93b2..cd7a8db 100644
85159 --- a/include/linux/dcache.h
85160 +++ b/include/linux/dcache.h
85161 @@ -119,6 +119,8 @@ struct dentry {
85162 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
85163 };
85164
85165 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
85166 +
85167 /*
85168 * dentry->d_lock spinlock nesting subclasses:
85169 *
85170 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
85171 index 3e9bd6a..f4e1aa0 100644
85172 --- a/include/linux/decompress/mm.h
85173 +++ b/include/linux/decompress/mm.h
85174 @@ -78,7 +78,7 @@ static void free(void *where)
85175 * warnings when not needed (indeed large_malloc / large_free are not
85176 * needed by inflate */
85177
85178 -#define malloc(a) kmalloc(a, GFP_KERNEL)
85179 +#define malloc(a) kmalloc((a), GFP_KERNEL)
85180 #define free(a) kfree(a)
85181
85182 #define large_malloc(a) vmalloc(a)
85183 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
85184 index 91b7618..92a93d32 100644
85185 --- a/include/linux/dma-mapping.h
85186 +++ b/include/linux/dma-mapping.h
85187 @@ -16,51 +16,51 @@ enum dma_data_direction {
85188 };
85189
85190 struct dma_map_ops {
85191 - void* (*alloc_coherent)(struct device *dev, size_t size,
85192 + void* (* const alloc_coherent)(struct device *dev, size_t size,
85193 dma_addr_t *dma_handle, gfp_t gfp);
85194 - void (*free_coherent)(struct device *dev, size_t size,
85195 + void (* const free_coherent)(struct device *dev, size_t size,
85196 void *vaddr, dma_addr_t dma_handle);
85197 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
85198 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
85199 unsigned long offset, size_t size,
85200 enum dma_data_direction dir,
85201 struct dma_attrs *attrs);
85202 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
85203 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
85204 size_t size, enum dma_data_direction dir,
85205 struct dma_attrs *attrs);
85206 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
85207 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
85208 int nents, enum dma_data_direction dir,
85209 struct dma_attrs *attrs);
85210 - void (*unmap_sg)(struct device *dev,
85211 + void (* const unmap_sg)(struct device *dev,
85212 struct scatterlist *sg, int nents,
85213 enum dma_data_direction dir,
85214 struct dma_attrs *attrs);
85215 - void (*sync_single_for_cpu)(struct device *dev,
85216 + void (* const sync_single_for_cpu)(struct device *dev,
85217 dma_addr_t dma_handle, size_t size,
85218 enum dma_data_direction dir);
85219 - void (*sync_single_for_device)(struct device *dev,
85220 + void (* const sync_single_for_device)(struct device *dev,
85221 dma_addr_t dma_handle, size_t size,
85222 enum dma_data_direction dir);
85223 - void (*sync_single_range_for_cpu)(struct device *dev,
85224 + void (* const sync_single_range_for_cpu)(struct device *dev,
85225 dma_addr_t dma_handle,
85226 unsigned long offset,
85227 size_t size,
85228 enum dma_data_direction dir);
85229 - void (*sync_single_range_for_device)(struct device *dev,
85230 + void (* const sync_single_range_for_device)(struct device *dev,
85231 dma_addr_t dma_handle,
85232 unsigned long offset,
85233 size_t size,
85234 enum dma_data_direction dir);
85235 - void (*sync_sg_for_cpu)(struct device *dev,
85236 + void (* const sync_sg_for_cpu)(struct device *dev,
85237 struct scatterlist *sg, int nents,
85238 enum dma_data_direction dir);
85239 - void (*sync_sg_for_device)(struct device *dev,
85240 + void (* const sync_sg_for_device)(struct device *dev,
85241 struct scatterlist *sg, int nents,
85242 enum dma_data_direction dir);
85243 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
85244 - int (*dma_supported)(struct device *dev, u64 mask);
85245 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
85246 + int (* const dma_supported)(struct device *dev, u64 mask);
85247 int (*set_dma_mask)(struct device *dev, u64 mask);
85248 int is_phys;
85249 -};
85250 +} __do_const;
85251
85252 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
85253
85254 diff --git a/include/linux/dst.h b/include/linux/dst.h
85255 index e26fed8..b976d9f 100644
85256 --- a/include/linux/dst.h
85257 +++ b/include/linux/dst.h
85258 @@ -380,7 +380,7 @@ struct dst_node
85259 struct thread_pool *pool;
85260
85261 /* Transaction IDs live here */
85262 - atomic_long_t gen;
85263 + atomic_long_unchecked_t gen;
85264
85265 /*
85266 * How frequently and how many times transaction
85267 diff --git a/include/linux/elf.h b/include/linux/elf.h
85268 index 90a4ed0..d652617 100644
85269 --- a/include/linux/elf.h
85270 +++ b/include/linux/elf.h
85271 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
85272 #define PT_GNU_EH_FRAME 0x6474e550
85273
85274 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
85275 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
85276 +
85277 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
85278 +
85279 +/* Constants for the e_flags field */
85280 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
85281 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
85282 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
85283 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
85284 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
85285 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
85286
85287 /* These constants define the different elf file types */
85288 #define ET_NONE 0
85289 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
85290 #define DT_DEBUG 21
85291 #define DT_TEXTREL 22
85292 #define DT_JMPREL 23
85293 +#define DT_FLAGS 30
85294 + #define DF_TEXTREL 0x00000004
85295 #define DT_ENCODING 32
85296 #define OLD_DT_LOOS 0x60000000
85297 #define DT_LOOS 0x6000000d
85298 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
85299 #define PF_W 0x2
85300 #define PF_X 0x1
85301
85302 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
85303 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
85304 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
85305 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
85306 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
85307 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
85308 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
85309 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
85310 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
85311 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
85312 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
85313 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
85314 +
85315 typedef struct elf32_phdr{
85316 Elf32_Word p_type;
85317 Elf32_Off p_offset;
85318 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
85319 #define EI_OSABI 7
85320 #define EI_PAD 8
85321
85322 +#define EI_PAX 14
85323 +
85324 #define ELFMAG0 0x7f /* EI_MAG */
85325 #define ELFMAG1 'E'
85326 #define ELFMAG2 'L'
85327 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
85328 #define elf_phdr elf32_phdr
85329 #define elf_note elf32_note
85330 #define elf_addr_t Elf32_Off
85331 +#define elf_dyn Elf32_Dyn
85332
85333 #else
85334
85335 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
85336 #define elf_phdr elf64_phdr
85337 #define elf_note elf64_note
85338 #define elf_addr_t Elf64_Off
85339 +#define elf_dyn Elf64_Dyn
85340
85341 #endif
85342
85343 diff --git a/include/linux/fs.h b/include/linux/fs.h
85344 index 1b9a47a..6fe2934 100644
85345 --- a/include/linux/fs.h
85346 +++ b/include/linux/fs.h
85347 @@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
85348 unsigned long, unsigned long);
85349
85350 struct address_space_operations {
85351 - int (*writepage)(struct page *page, struct writeback_control *wbc);
85352 - int (*readpage)(struct file *, struct page *);
85353 - void (*sync_page)(struct page *);
85354 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
85355 + int (* const readpage)(struct file *, struct page *);
85356 + void (* const sync_page)(struct page *);
85357
85358 /* Write back some dirty pages from this mapping. */
85359 - int (*writepages)(struct address_space *, struct writeback_control *);
85360 + int (* const writepages)(struct address_space *, struct writeback_control *);
85361
85362 /* Set a page dirty. Return true if this dirtied it */
85363 - int (*set_page_dirty)(struct page *page);
85364 + int (* const set_page_dirty)(struct page *page);
85365
85366 - int (*readpages)(struct file *filp, struct address_space *mapping,
85367 + int (* const readpages)(struct file *filp, struct address_space *mapping,
85368 struct list_head *pages, unsigned nr_pages);
85369
85370 - int (*write_begin)(struct file *, struct address_space *mapping,
85371 + int (* const write_begin)(struct file *, struct address_space *mapping,
85372 loff_t pos, unsigned len, unsigned flags,
85373 struct page **pagep, void **fsdata);
85374 - int (*write_end)(struct file *, struct address_space *mapping,
85375 + int (* const write_end)(struct file *, struct address_space *mapping,
85376 loff_t pos, unsigned len, unsigned copied,
85377 struct page *page, void *fsdata);
85378
85379 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
85380 - sector_t (*bmap)(struct address_space *, sector_t);
85381 - void (*invalidatepage) (struct page *, unsigned long);
85382 - int (*releasepage) (struct page *, gfp_t);
85383 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
85384 + sector_t (* const bmap)(struct address_space *, sector_t);
85385 + void (* const invalidatepage) (struct page *, unsigned long);
85386 + int (* const releasepage) (struct page *, gfp_t);
85387 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
85388 loff_t offset, unsigned long nr_segs);
85389 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
85390 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
85391 void **, unsigned long *);
85392 /* migrate the contents of a page to the specified target */
85393 - int (*migratepage) (struct address_space *,
85394 + int (* const migratepage) (struct address_space *,
85395 struct page *, struct page *);
85396 - int (*launder_page) (struct page *);
85397 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
85398 + int (* const launder_page) (struct page *);
85399 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
85400 unsigned long);
85401 - int (*error_remove_page)(struct address_space *, struct page *);
85402 + int (* const error_remove_page)(struct address_space *, struct page *);
85403 };
85404
85405 /*
85406 @@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
85407 typedef struct files_struct *fl_owner_t;
85408
85409 struct file_lock_operations {
85410 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
85411 - void (*fl_release_private)(struct file_lock *);
85412 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
85413 + void (* const fl_release_private)(struct file_lock *);
85414 };
85415
85416 struct lock_manager_operations {
85417 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
85418 - void (*fl_notify)(struct file_lock *); /* unblock callback */
85419 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
85420 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
85421 - void (*fl_release_private)(struct file_lock *);
85422 - void (*fl_break)(struct file_lock *);
85423 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
85424 - int (*fl_change)(struct file_lock **, int);
85425 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
85426 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
85427 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
85428 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
85429 + void (* const fl_release_private)(struct file_lock *);
85430 + void (* const fl_break)(struct file_lock *);
85431 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
85432 + int (* const fl_change)(struct file_lock **, int);
85433 };
85434
85435 struct lock_manager {
85436 @@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
85437 unsigned int fi_flags; /* Flags as passed from user */
85438 unsigned int fi_extents_mapped; /* Number of mapped extents */
85439 unsigned int fi_extents_max; /* Size of fiemap_extent array */
85440 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
85441 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
85442 * array */
85443 };
85444 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
85445 @@ -1512,7 +1512,8 @@ struct file_operations {
85446 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
85447 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
85448 int (*setlease)(struct file *, long, struct file_lock **);
85449 -};
85450 +} __do_const;
85451 +typedef struct file_operations __no_const file_operations_no_const;
85452
85453 struct inode_operations {
85454 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
85455 @@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
85456 unsigned long, loff_t *);
85457
85458 struct super_operations {
85459 - struct inode *(*alloc_inode)(struct super_block *sb);
85460 - void (*destroy_inode)(struct inode *);
85461 + struct inode *(* const alloc_inode)(struct super_block *sb);
85462 + void (* const destroy_inode)(struct inode *);
85463
85464 - void (*dirty_inode) (struct inode *);
85465 - int (*write_inode) (struct inode *, int);
85466 - void (*drop_inode) (struct inode *);
85467 - void (*delete_inode) (struct inode *);
85468 - void (*put_super) (struct super_block *);
85469 - void (*write_super) (struct super_block *);
85470 - int (*sync_fs)(struct super_block *sb, int wait);
85471 - int (*freeze_fs) (struct super_block *);
85472 - int (*unfreeze_fs) (struct super_block *);
85473 - int (*statfs) (struct dentry *, struct kstatfs *);
85474 - int (*remount_fs) (struct super_block *, int *, char *);
85475 - void (*clear_inode) (struct inode *);
85476 - void (*umount_begin) (struct super_block *);
85477 + void (* const dirty_inode) (struct inode *);
85478 + int (* const write_inode) (struct inode *, int);
85479 + void (* const drop_inode) (struct inode *);
85480 + void (* const delete_inode) (struct inode *);
85481 + void (* const put_super) (struct super_block *);
85482 + void (* const write_super) (struct super_block *);
85483 + int (* const sync_fs)(struct super_block *sb, int wait);
85484 + int (* const freeze_fs) (struct super_block *);
85485 + int (* const unfreeze_fs) (struct super_block *);
85486 + int (* const statfs) (struct dentry *, struct kstatfs *);
85487 + int (* const remount_fs) (struct super_block *, int *, char *);
85488 + void (* const clear_inode) (struct inode *);
85489 + void (* const umount_begin) (struct super_block *);
85490
85491 - int (*show_options)(struct seq_file *, struct vfsmount *);
85492 - int (*show_stats)(struct seq_file *, struct vfsmount *);
85493 + int (* const show_options)(struct seq_file *, struct vfsmount *);
85494 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
85495 #ifdef CONFIG_QUOTA
85496 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
85497 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
85498 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
85499 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
85500 #endif
85501 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
85502 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
85503 };
85504
85505 /*
85506 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
85507 index 78a05bf..2a7d3e1 100644
85508 --- a/include/linux/fs_struct.h
85509 +++ b/include/linux/fs_struct.h
85510 @@ -4,7 +4,7 @@
85511 #include <linux/path.h>
85512
85513 struct fs_struct {
85514 - int users;
85515 + atomic_t users;
85516 rwlock_t lock;
85517 int umask;
85518 int in_exec;
85519 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
85520 index 7be0c6f..2f63a2b 100644
85521 --- a/include/linux/fscache-cache.h
85522 +++ b/include/linux/fscache-cache.h
85523 @@ -116,7 +116,7 @@ struct fscache_operation {
85524 #endif
85525 };
85526
85527 -extern atomic_t fscache_op_debug_id;
85528 +extern atomic_unchecked_t fscache_op_debug_id;
85529 extern const struct slow_work_ops fscache_op_slow_work_ops;
85530
85531 extern void fscache_enqueue_operation(struct fscache_operation *);
85532 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
85533 fscache_operation_release_t release)
85534 {
85535 atomic_set(&op->usage, 1);
85536 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
85537 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
85538 op->release = release;
85539 INIT_LIST_HEAD(&op->pend_link);
85540 fscache_set_op_state(op, "Init");
85541 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
85542 index 4d6f47b..00bcedb 100644
85543 --- a/include/linux/fsnotify_backend.h
85544 +++ b/include/linux/fsnotify_backend.h
85545 @@ -86,6 +86,7 @@ struct fsnotify_ops {
85546 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
85547 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
85548 };
85549 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
85550
85551 /*
85552 * A group is a "thing" that wants to receive notification about filesystem
85553 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
85554 index 4ec5e67..42f1eb9 100644
85555 --- a/include/linux/ftrace_event.h
85556 +++ b/include/linux/ftrace_event.h
85557 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
85558 int filter_type);
85559 extern int trace_define_common_fields(struct ftrace_event_call *call);
85560
85561 -#define is_signed_type(type) (((type)(-1)) < 0)
85562 +#define is_signed_type(type) (((type)(-1)) < (type)1)
85563
85564 int trace_set_clr_event(const char *system, const char *event, int set);
85565
85566 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
85567 index 297df45..b6a74ff 100644
85568 --- a/include/linux/genhd.h
85569 +++ b/include/linux/genhd.h
85570 @@ -161,7 +161,7 @@ struct gendisk {
85571
85572 struct timer_rand_state *random;
85573
85574 - atomic_t sync_io; /* RAID */
85575 + atomic_unchecked_t sync_io; /* RAID */
85576 struct work_struct async_notify;
85577 #ifdef CONFIG_BLK_DEV_INTEGRITY
85578 struct blk_integrity *integrity;
85579 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
85580 new file mode 100644
85581 index 0000000..af663cf
85582 --- /dev/null
85583 +++ b/include/linux/gracl.h
85584 @@ -0,0 +1,319 @@
85585 +#ifndef GR_ACL_H
85586 +#define GR_ACL_H
85587 +
85588 +#include <linux/grdefs.h>
85589 +#include <linux/resource.h>
85590 +#include <linux/capability.h>
85591 +#include <linux/dcache.h>
85592 +#include <asm/resource.h>
85593 +
85594 +/* Major status information */
85595 +
85596 +#define GR_VERSION "grsecurity 2.9"
85597 +#define GRSECURITY_VERSION 0x2900
85598 +
85599 +enum {
85600 + GR_SHUTDOWN = 0,
85601 + GR_ENABLE = 1,
85602 + GR_SPROLE = 2,
85603 + GR_RELOAD = 3,
85604 + GR_SEGVMOD = 4,
85605 + GR_STATUS = 5,
85606 + GR_UNSPROLE = 6,
85607 + GR_PASSSET = 7,
85608 + GR_SPROLEPAM = 8,
85609 +};
85610 +
85611 +/* Password setup definitions
85612 + * kernel/grhash.c */
85613 +enum {
85614 + GR_PW_LEN = 128,
85615 + GR_SALT_LEN = 16,
85616 + GR_SHA_LEN = 32,
85617 +};
85618 +
85619 +enum {
85620 + GR_SPROLE_LEN = 64,
85621 +};
85622 +
85623 +enum {
85624 + GR_NO_GLOB = 0,
85625 + GR_REG_GLOB,
85626 + GR_CREATE_GLOB
85627 +};
85628 +
85629 +#define GR_NLIMITS 32
85630 +
85631 +/* Begin Data Structures */
85632 +
85633 +struct sprole_pw {
85634 + unsigned char *rolename;
85635 + unsigned char salt[GR_SALT_LEN];
85636 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
85637 +};
85638 +
85639 +struct name_entry {
85640 + __u32 key;
85641 + ino_t inode;
85642 + dev_t device;
85643 + char *name;
85644 + __u16 len;
85645 + __u8 deleted;
85646 + struct name_entry *prev;
85647 + struct name_entry *next;
85648 +};
85649 +
85650 +struct inodev_entry {
85651 + struct name_entry *nentry;
85652 + struct inodev_entry *prev;
85653 + struct inodev_entry *next;
85654 +};
85655 +
85656 +struct acl_role_db {
85657 + struct acl_role_label **r_hash;
85658 + __u32 r_size;
85659 +};
85660 +
85661 +struct inodev_db {
85662 + struct inodev_entry **i_hash;
85663 + __u32 i_size;
85664 +};
85665 +
85666 +struct name_db {
85667 + struct name_entry **n_hash;
85668 + __u32 n_size;
85669 +};
85670 +
85671 +struct crash_uid {
85672 + uid_t uid;
85673 + unsigned long expires;
85674 +};
85675 +
85676 +struct gr_hash_struct {
85677 + void **table;
85678 + void **nametable;
85679 + void *first;
85680 + __u32 table_size;
85681 + __u32 used_size;
85682 + int type;
85683 +};
85684 +
85685 +/* Userspace Grsecurity ACL data structures */
85686 +
85687 +struct acl_subject_label {
85688 + char *filename;
85689 + ino_t inode;
85690 + dev_t device;
85691 + __u32 mode;
85692 + kernel_cap_t cap_mask;
85693 + kernel_cap_t cap_lower;
85694 + kernel_cap_t cap_invert_audit;
85695 +
85696 + struct rlimit res[GR_NLIMITS];
85697 + __u32 resmask;
85698 +
85699 + __u8 user_trans_type;
85700 + __u8 group_trans_type;
85701 + uid_t *user_transitions;
85702 + gid_t *group_transitions;
85703 + __u16 user_trans_num;
85704 + __u16 group_trans_num;
85705 +
85706 + __u32 sock_families[2];
85707 + __u32 ip_proto[8];
85708 + __u32 ip_type;
85709 + struct acl_ip_label **ips;
85710 + __u32 ip_num;
85711 + __u32 inaddr_any_override;
85712 +
85713 + __u32 crashes;
85714 + unsigned long expires;
85715 +
85716 + struct acl_subject_label *parent_subject;
85717 + struct gr_hash_struct *hash;
85718 + struct acl_subject_label *prev;
85719 + struct acl_subject_label *next;
85720 +
85721 + struct acl_object_label **obj_hash;
85722 + __u32 obj_hash_size;
85723 + __u16 pax_flags;
85724 +};
85725 +
85726 +struct role_allowed_ip {
85727 + __u32 addr;
85728 + __u32 netmask;
85729 +
85730 + struct role_allowed_ip *prev;
85731 + struct role_allowed_ip *next;
85732 +};
85733 +
85734 +struct role_transition {
85735 + char *rolename;
85736 +
85737 + struct role_transition *prev;
85738 + struct role_transition *next;
85739 +};
85740 +
85741 +struct acl_role_label {
85742 + char *rolename;
85743 + uid_t uidgid;
85744 + __u16 roletype;
85745 +
85746 + __u16 auth_attempts;
85747 + unsigned long expires;
85748 +
85749 + struct acl_subject_label *root_label;
85750 + struct gr_hash_struct *hash;
85751 +
85752 + struct acl_role_label *prev;
85753 + struct acl_role_label *next;
85754 +
85755 + struct role_transition *transitions;
85756 + struct role_allowed_ip *allowed_ips;
85757 + uid_t *domain_children;
85758 + __u16 domain_child_num;
85759 +
85760 + mode_t umask;
85761 +
85762 + struct acl_subject_label **subj_hash;
85763 + __u32 subj_hash_size;
85764 +};
85765 +
85766 +struct user_acl_role_db {
85767 + struct acl_role_label **r_table;
85768 + __u32 num_pointers; /* Number of allocations to track */
85769 + __u32 num_roles; /* Number of roles */
85770 + __u32 num_domain_children; /* Number of domain children */
85771 + __u32 num_subjects; /* Number of subjects */
85772 + __u32 num_objects; /* Number of objects */
85773 +};
85774 +
85775 +struct acl_object_label {
85776 + char *filename;
85777 + ino_t inode;
85778 + dev_t device;
85779 + __u32 mode;
85780 +
85781 + struct acl_subject_label *nested;
85782 + struct acl_object_label *globbed;
85783 +
85784 + /* next two structures not used */
85785 +
85786 + struct acl_object_label *prev;
85787 + struct acl_object_label *next;
85788 +};
85789 +
85790 +struct acl_ip_label {
85791 + char *iface;
85792 + __u32 addr;
85793 + __u32 netmask;
85794 + __u16 low, high;
85795 + __u8 mode;
85796 + __u32 type;
85797 + __u32 proto[8];
85798 +
85799 + /* next two structures not used */
85800 +
85801 + struct acl_ip_label *prev;
85802 + struct acl_ip_label *next;
85803 +};
85804 +
85805 +struct gr_arg {
85806 + struct user_acl_role_db role_db;
85807 + unsigned char pw[GR_PW_LEN];
85808 + unsigned char salt[GR_SALT_LEN];
85809 + unsigned char sum[GR_SHA_LEN];
85810 + unsigned char sp_role[GR_SPROLE_LEN];
85811 + struct sprole_pw *sprole_pws;
85812 + dev_t segv_device;
85813 + ino_t segv_inode;
85814 + uid_t segv_uid;
85815 + __u16 num_sprole_pws;
85816 + __u16 mode;
85817 +};
85818 +
85819 +struct gr_arg_wrapper {
85820 + struct gr_arg *arg;
85821 + __u32 version;
85822 + __u32 size;
85823 +};
85824 +
85825 +struct subject_map {
85826 + struct acl_subject_label *user;
85827 + struct acl_subject_label *kernel;
85828 + struct subject_map *prev;
85829 + struct subject_map *next;
85830 +};
85831 +
85832 +struct acl_subj_map_db {
85833 + struct subject_map **s_hash;
85834 + __u32 s_size;
85835 +};
85836 +
85837 +/* End Data Structures Section */
85838 +
85839 +/* Hash functions generated by empirical testing by Brad Spengler
85840 + Makes good use of the low bits of the inode. Generally 0-1 times
85841 + in loop for successful match. 0-3 for unsuccessful match.
85842 + Shift/add algorithm with modulus of table size and an XOR*/
85843 +
85844 +static __inline__ unsigned int
85845 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
85846 +{
85847 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
85848 +}
85849 +
85850 + static __inline__ unsigned int
85851 +shash(const struct acl_subject_label *userp, const unsigned int sz)
85852 +{
85853 + return ((const unsigned long)userp % sz);
85854 +}
85855 +
85856 +static __inline__ unsigned int
85857 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
85858 +{
85859 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
85860 +}
85861 +
85862 +static __inline__ unsigned int
85863 +nhash(const char *name, const __u16 len, const unsigned int sz)
85864 +{
85865 + return full_name_hash((const unsigned char *)name, len) % sz;
85866 +}
85867 +
85868 +#define FOR_EACH_ROLE_START(role) \
85869 + role = role_list; \
85870 + while (role) {
85871 +
85872 +#define FOR_EACH_ROLE_END(role) \
85873 + role = role->prev; \
85874 + }
85875 +
85876 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
85877 + subj = NULL; \
85878 + iter = 0; \
85879 + while (iter < role->subj_hash_size) { \
85880 + if (subj == NULL) \
85881 + subj = role->subj_hash[iter]; \
85882 + if (subj == NULL) { \
85883 + iter++; \
85884 + continue; \
85885 + }
85886 +
85887 +#define FOR_EACH_SUBJECT_END(subj,iter) \
85888 + subj = subj->next; \
85889 + if (subj == NULL) \
85890 + iter++; \
85891 + }
85892 +
85893 +
85894 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
85895 + subj = role->hash->first; \
85896 + while (subj != NULL) {
85897 +
85898 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
85899 + subj = subj->next; \
85900 + }
85901 +
85902 +#endif
85903 +
85904 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
85905 new file mode 100644
85906 index 0000000..323ecf2
85907 --- /dev/null
85908 +++ b/include/linux/gralloc.h
85909 @@ -0,0 +1,9 @@
85910 +#ifndef __GRALLOC_H
85911 +#define __GRALLOC_H
85912 +
85913 +void acl_free_all(void);
85914 +int acl_alloc_stack_init(unsigned long size);
85915 +void *acl_alloc(unsigned long len);
85916 +void *acl_alloc_num(unsigned long num, unsigned long len);
85917 +
85918 +#endif
85919 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
85920 new file mode 100644
85921 index 0000000..70d6cd5
85922 --- /dev/null
85923 +++ b/include/linux/grdefs.h
85924 @@ -0,0 +1,140 @@
85925 +#ifndef GRDEFS_H
85926 +#define GRDEFS_H
85927 +
85928 +/* Begin grsecurity status declarations */
85929 +
85930 +enum {
85931 + GR_READY = 0x01,
85932 + GR_STATUS_INIT = 0x00 // disabled state
85933 +};
85934 +
85935 +/* Begin ACL declarations */
85936 +
85937 +/* Role flags */
85938 +
85939 +enum {
85940 + GR_ROLE_USER = 0x0001,
85941 + GR_ROLE_GROUP = 0x0002,
85942 + GR_ROLE_DEFAULT = 0x0004,
85943 + GR_ROLE_SPECIAL = 0x0008,
85944 + GR_ROLE_AUTH = 0x0010,
85945 + GR_ROLE_NOPW = 0x0020,
85946 + GR_ROLE_GOD = 0x0040,
85947 + GR_ROLE_LEARN = 0x0080,
85948 + GR_ROLE_TPE = 0x0100,
85949 + GR_ROLE_DOMAIN = 0x0200,
85950 + GR_ROLE_PAM = 0x0400,
85951 + GR_ROLE_PERSIST = 0x800
85952 +};
85953 +
85954 +/* ACL Subject and Object mode flags */
85955 +enum {
85956 + GR_DELETED = 0x80000000
85957 +};
85958 +
85959 +/* ACL Object-only mode flags */
85960 +enum {
85961 + GR_READ = 0x00000001,
85962 + GR_APPEND = 0x00000002,
85963 + GR_WRITE = 0x00000004,
85964 + GR_EXEC = 0x00000008,
85965 + GR_FIND = 0x00000010,
85966 + GR_INHERIT = 0x00000020,
85967 + GR_SETID = 0x00000040,
85968 + GR_CREATE = 0x00000080,
85969 + GR_DELETE = 0x00000100,
85970 + GR_LINK = 0x00000200,
85971 + GR_AUDIT_READ = 0x00000400,
85972 + GR_AUDIT_APPEND = 0x00000800,
85973 + GR_AUDIT_WRITE = 0x00001000,
85974 + GR_AUDIT_EXEC = 0x00002000,
85975 + GR_AUDIT_FIND = 0x00004000,
85976 + GR_AUDIT_INHERIT= 0x00008000,
85977 + GR_AUDIT_SETID = 0x00010000,
85978 + GR_AUDIT_CREATE = 0x00020000,
85979 + GR_AUDIT_DELETE = 0x00040000,
85980 + GR_AUDIT_LINK = 0x00080000,
85981 + GR_PTRACERD = 0x00100000,
85982 + GR_NOPTRACE = 0x00200000,
85983 + GR_SUPPRESS = 0x00400000,
85984 + GR_NOLEARN = 0x00800000,
85985 + GR_INIT_TRANSFER= 0x01000000
85986 +};
85987 +
85988 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
85989 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
85990 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
85991 +
85992 +/* ACL subject-only mode flags */
85993 +enum {
85994 + GR_KILL = 0x00000001,
85995 + GR_VIEW = 0x00000002,
85996 + GR_PROTECTED = 0x00000004,
85997 + GR_LEARN = 0x00000008,
85998 + GR_OVERRIDE = 0x00000010,
85999 + /* just a placeholder, this mode is only used in userspace */
86000 + GR_DUMMY = 0x00000020,
86001 + GR_PROTSHM = 0x00000040,
86002 + GR_KILLPROC = 0x00000080,
86003 + GR_KILLIPPROC = 0x00000100,
86004 + /* just a placeholder, this mode is only used in userspace */
86005 + GR_NOTROJAN = 0x00000200,
86006 + GR_PROTPROCFD = 0x00000400,
86007 + GR_PROCACCT = 0x00000800,
86008 + GR_RELAXPTRACE = 0x00001000,
86009 + GR_NESTED = 0x00002000,
86010 + GR_INHERITLEARN = 0x00004000,
86011 + GR_PROCFIND = 0x00008000,
86012 + GR_POVERRIDE = 0x00010000,
86013 + GR_KERNELAUTH = 0x00020000,
86014 + GR_ATSECURE = 0x00040000,
86015 + GR_SHMEXEC = 0x00080000
86016 +};
86017 +
86018 +enum {
86019 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
86020 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
86021 + GR_PAX_ENABLE_MPROTECT = 0x0004,
86022 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
86023 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
86024 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
86025 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
86026 + GR_PAX_DISABLE_MPROTECT = 0x0400,
86027 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
86028 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
86029 +};
86030 +
86031 +enum {
86032 + GR_ID_USER = 0x01,
86033 + GR_ID_GROUP = 0x02,
86034 +};
86035 +
86036 +enum {
86037 + GR_ID_ALLOW = 0x01,
86038 + GR_ID_DENY = 0x02,
86039 +};
86040 +
86041 +#define GR_CRASH_RES 31
86042 +#define GR_UIDTABLE_MAX 500
86043 +
86044 +/* begin resource learning section */
86045 +enum {
86046 + GR_RLIM_CPU_BUMP = 60,
86047 + GR_RLIM_FSIZE_BUMP = 50000,
86048 + GR_RLIM_DATA_BUMP = 10000,
86049 + GR_RLIM_STACK_BUMP = 1000,
86050 + GR_RLIM_CORE_BUMP = 10000,
86051 + GR_RLIM_RSS_BUMP = 500000,
86052 + GR_RLIM_NPROC_BUMP = 1,
86053 + GR_RLIM_NOFILE_BUMP = 5,
86054 + GR_RLIM_MEMLOCK_BUMP = 50000,
86055 + GR_RLIM_AS_BUMP = 500000,
86056 + GR_RLIM_LOCKS_BUMP = 2,
86057 + GR_RLIM_SIGPENDING_BUMP = 5,
86058 + GR_RLIM_MSGQUEUE_BUMP = 10000,
86059 + GR_RLIM_NICE_BUMP = 1,
86060 + GR_RLIM_RTPRIO_BUMP = 1,
86061 + GR_RLIM_RTTIME_BUMP = 1000000
86062 +};
86063 +
86064 +#endif
86065 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
86066 new file mode 100644
86067 index 0000000..3826b91
86068 --- /dev/null
86069 +++ b/include/linux/grinternal.h
86070 @@ -0,0 +1,219 @@
86071 +#ifndef __GRINTERNAL_H
86072 +#define __GRINTERNAL_H
86073 +
86074 +#ifdef CONFIG_GRKERNSEC
86075 +
86076 +#include <linux/fs.h>
86077 +#include <linux/mnt_namespace.h>
86078 +#include <linux/nsproxy.h>
86079 +#include <linux/gracl.h>
86080 +#include <linux/grdefs.h>
86081 +#include <linux/grmsg.h>
86082 +
86083 +void gr_add_learn_entry(const char *fmt, ...)
86084 + __attribute__ ((format (printf, 1, 2)));
86085 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
86086 + const struct vfsmount *mnt);
86087 +__u32 gr_check_create(const struct dentry *new_dentry,
86088 + const struct dentry *parent,
86089 + const struct vfsmount *mnt, const __u32 mode);
86090 +int gr_check_protected_task(const struct task_struct *task);
86091 +__u32 to_gr_audit(const __u32 reqmode);
86092 +int gr_set_acls(const int type);
86093 +int gr_apply_subject_to_task(struct task_struct *task);
86094 +int gr_acl_is_enabled(void);
86095 +char gr_roletype_to_char(void);
86096 +
86097 +void gr_handle_alertkill(struct task_struct *task);
86098 +char *gr_to_filename(const struct dentry *dentry,
86099 + const struct vfsmount *mnt);
86100 +char *gr_to_filename1(const struct dentry *dentry,
86101 + const struct vfsmount *mnt);
86102 +char *gr_to_filename2(const struct dentry *dentry,
86103 + const struct vfsmount *mnt);
86104 +char *gr_to_filename3(const struct dentry *dentry,
86105 + const struct vfsmount *mnt);
86106 +
86107 +extern int grsec_enable_ptrace_readexec;
86108 +extern int grsec_enable_harden_ptrace;
86109 +extern int grsec_enable_link;
86110 +extern int grsec_enable_fifo;
86111 +extern int grsec_enable_shm;
86112 +extern int grsec_enable_execlog;
86113 +extern int grsec_enable_signal;
86114 +extern int grsec_enable_audit_ptrace;
86115 +extern int grsec_enable_forkfail;
86116 +extern int grsec_enable_time;
86117 +extern int grsec_enable_rofs;
86118 +extern int grsec_enable_chroot_shmat;
86119 +extern int grsec_enable_chroot_mount;
86120 +extern int grsec_enable_chroot_double;
86121 +extern int grsec_enable_chroot_pivot;
86122 +extern int grsec_enable_chroot_chdir;
86123 +extern int grsec_enable_chroot_chmod;
86124 +extern int grsec_enable_chroot_mknod;
86125 +extern int grsec_enable_chroot_fchdir;
86126 +extern int grsec_enable_chroot_nice;
86127 +extern int grsec_enable_chroot_execlog;
86128 +extern int grsec_enable_chroot_caps;
86129 +extern int grsec_enable_chroot_sysctl;
86130 +extern int grsec_enable_chroot_unix;
86131 +extern int grsec_enable_tpe;
86132 +extern int grsec_tpe_gid;
86133 +extern int grsec_enable_tpe_all;
86134 +extern int grsec_enable_tpe_invert;
86135 +extern int grsec_enable_socket_all;
86136 +extern int grsec_socket_all_gid;
86137 +extern int grsec_enable_socket_client;
86138 +extern int grsec_socket_client_gid;
86139 +extern int grsec_enable_socket_server;
86140 +extern int grsec_socket_server_gid;
86141 +extern int grsec_audit_gid;
86142 +extern int grsec_enable_group;
86143 +extern int grsec_enable_audit_textrel;
86144 +extern int grsec_enable_log_rwxmaps;
86145 +extern int grsec_enable_mount;
86146 +extern int grsec_enable_chdir;
86147 +extern int grsec_resource_logging;
86148 +extern int grsec_enable_blackhole;
86149 +extern int grsec_lastack_retries;
86150 +extern int grsec_enable_brute;
86151 +extern int grsec_lock;
86152 +
86153 +extern spinlock_t grsec_alert_lock;
86154 +extern unsigned long grsec_alert_wtime;
86155 +extern unsigned long grsec_alert_fyet;
86156 +
86157 +extern spinlock_t grsec_audit_lock;
86158 +
86159 +extern rwlock_t grsec_exec_file_lock;
86160 +
86161 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
86162 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
86163 + (tsk)->exec_file->f_vfsmnt) : "/")
86164 +
86165 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
86166 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
86167 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
86168 +
86169 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
86170 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
86171 + (tsk)->exec_file->f_vfsmnt) : "/")
86172 +
86173 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
86174 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
86175 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
86176 +
86177 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
86178 +
86179 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
86180 +
86181 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
86182 + (task)->pid, (cred)->uid, \
86183 + (cred)->euid, (cred)->gid, (cred)->egid, \
86184 + gr_parent_task_fullpath(task), \
86185 + (task)->real_parent->comm, (task)->real_parent->pid, \
86186 + (pcred)->uid, (pcred)->euid, \
86187 + (pcred)->gid, (pcred)->egid
86188 +
86189 +#define GR_CHROOT_CAPS {{ \
86190 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
86191 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
86192 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
86193 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
86194 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
86195 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
86196 + CAP_TO_MASK(CAP_MAC_ADMIN) }}
86197 +
86198 +#define security_learn(normal_msg,args...) \
86199 +({ \
86200 + read_lock(&grsec_exec_file_lock); \
86201 + gr_add_learn_entry(normal_msg "\n", ## args); \
86202 + read_unlock(&grsec_exec_file_lock); \
86203 +})
86204 +
86205 +enum {
86206 + GR_DO_AUDIT,
86207 + GR_DONT_AUDIT,
86208 + GR_DONT_AUDIT_GOOD
86209 +};
86210 +
86211 +enum {
86212 + GR_TTYSNIFF,
86213 + GR_RBAC,
86214 + GR_RBAC_STR,
86215 + GR_STR_RBAC,
86216 + GR_RBAC_MODE2,
86217 + GR_RBAC_MODE3,
86218 + GR_FILENAME,
86219 + GR_SYSCTL_HIDDEN,
86220 + GR_NOARGS,
86221 + GR_ONE_INT,
86222 + GR_ONE_INT_TWO_STR,
86223 + GR_ONE_STR,
86224 + GR_STR_INT,
86225 + GR_TWO_STR_INT,
86226 + GR_TWO_INT,
86227 + GR_TWO_U64,
86228 + GR_THREE_INT,
86229 + GR_FIVE_INT_TWO_STR,
86230 + GR_TWO_STR,
86231 + GR_THREE_STR,
86232 + GR_FOUR_STR,
86233 + GR_STR_FILENAME,
86234 + GR_FILENAME_STR,
86235 + GR_FILENAME_TWO_INT,
86236 + GR_FILENAME_TWO_INT_STR,
86237 + GR_TEXTREL,
86238 + GR_PTRACE,
86239 + GR_RESOURCE,
86240 + GR_CAP,
86241 + GR_SIG,
86242 + GR_SIG2,
86243 + GR_CRASH1,
86244 + GR_CRASH2,
86245 + GR_PSACCT,
86246 + GR_RWXMAP
86247 +};
86248 +
86249 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
86250 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
86251 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
86252 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
86253 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
86254 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
86255 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
86256 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
86257 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
86258 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
86259 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
86260 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
86261 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
86262 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
86263 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
86264 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
86265 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
86266 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
86267 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
86268 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
86269 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
86270 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
86271 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
86272 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
86273 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
86274 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
86275 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
86276 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
86277 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
86278 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
86279 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
86280 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
86281 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
86282 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
86283 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
86284 +
86285 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
86286 +
86287 +#endif
86288 +
86289 +#endif
86290 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
86291 new file mode 100644
86292 index 0000000..f885406
86293 --- /dev/null
86294 +++ b/include/linux/grmsg.h
86295 @@ -0,0 +1,109 @@
86296 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
86297 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
86298 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
86299 +#define GR_STOPMOD_MSG "denied modification of module state by "
86300 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
86301 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
86302 +#define GR_IOPERM_MSG "denied use of ioperm() by "
86303 +#define GR_IOPL_MSG "denied use of iopl() by "
86304 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
86305 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
86306 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
86307 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
86308 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
86309 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
86310 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
86311 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
86312 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
86313 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
86314 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
86315 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
86316 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
86317 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
86318 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
86319 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
86320 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
86321 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
86322 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
86323 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
86324 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
86325 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
86326 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
86327 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
86328 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
86329 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
86330 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
86331 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
86332 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
86333 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
86334 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
86335 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
86336 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
86337 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
86338 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
86339 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
86340 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
86341 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
86342 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
86343 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
86344 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
86345 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
86346 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
86347 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
86348 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
86349 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
86350 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
86351 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
86352 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
86353 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
86354 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
86355 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
86356 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
86357 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
86358 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
86359 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
86360 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
86361 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
86362 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
86363 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
86364 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
86365 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
86366 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
86367 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
86368 +#define GR_NICE_CHROOT_MSG "denied priority change by "
86369 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
86370 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
86371 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
86372 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
86373 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
86374 +#define GR_TIME_MSG "time set by "
86375 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
86376 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
86377 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
86378 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
86379 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
86380 +#define GR_BIND_MSG "denied bind() by "
86381 +#define GR_CONNECT_MSG "denied connect() by "
86382 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
86383 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
86384 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
86385 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
86386 +#define GR_CAP_ACL_MSG "use of %s denied for "
86387 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
86388 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
86389 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
86390 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
86391 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
86392 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
86393 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
86394 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
86395 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
86396 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
86397 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
86398 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
86399 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
86400 +#define GR_VM86_MSG "denied use of vm86 by "
86401 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
86402 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
86403 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
86404 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
86405 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
86406 new file mode 100644
86407 index 0000000..c1793ae
86408 --- /dev/null
86409 +++ b/include/linux/grsecurity.h
86410 @@ -0,0 +1,219 @@
86411 +#ifndef GR_SECURITY_H
86412 +#define GR_SECURITY_H
86413 +#include <linux/fs.h>
86414 +#include <linux/fs_struct.h>
86415 +#include <linux/binfmts.h>
86416 +#include <linux/gracl.h>
86417 +#include <linux/compat.h>
86418 +
86419 +/* notify of brain-dead configs */
86420 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86421 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
86422 +#endif
86423 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
86424 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
86425 +#endif
86426 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
86427 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
86428 +#endif
86429 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
86430 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
86431 +#endif
86432 +
86433 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
86434 +void gr_handle_brute_check(void);
86435 +void gr_handle_kernel_exploit(void);
86436 +int gr_process_user_ban(void);
86437 +
86438 +char gr_roletype_to_char(void);
86439 +
86440 +int gr_acl_enable_at_secure(void);
86441 +
86442 +int gr_check_user_change(int real, int effective, int fs);
86443 +int gr_check_group_change(int real, int effective, int fs);
86444 +
86445 +void gr_del_task_from_ip_table(struct task_struct *p);
86446 +
86447 +int gr_pid_is_chrooted(struct task_struct *p);
86448 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
86449 +int gr_handle_chroot_nice(void);
86450 +int gr_handle_chroot_sysctl(const int op);
86451 +int gr_handle_chroot_setpriority(struct task_struct *p,
86452 + const int niceval);
86453 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
86454 +int gr_handle_chroot_chroot(const struct dentry *dentry,
86455 + const struct vfsmount *mnt);
86456 +void gr_handle_chroot_chdir(struct path *path);
86457 +int gr_handle_chroot_chmod(const struct dentry *dentry,
86458 + const struct vfsmount *mnt, const int mode);
86459 +int gr_handle_chroot_mknod(const struct dentry *dentry,
86460 + const struct vfsmount *mnt, const int mode);
86461 +int gr_handle_chroot_mount(const struct dentry *dentry,
86462 + const struct vfsmount *mnt,
86463 + const char *dev_name);
86464 +int gr_handle_chroot_pivot(void);
86465 +int gr_handle_chroot_unix(const pid_t pid);
86466 +
86467 +int gr_handle_rawio(const struct inode *inode);
86468 +
86469 +void gr_handle_ioperm(void);
86470 +void gr_handle_iopl(void);
86471 +
86472 +umode_t gr_acl_umask(void);
86473 +
86474 +int gr_tpe_allow(const struct file *file);
86475 +
86476 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
86477 +void gr_clear_chroot_entries(struct task_struct *task);
86478 +
86479 +void gr_log_forkfail(const int retval);
86480 +void gr_log_timechange(void);
86481 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
86482 +void gr_log_chdir(const struct dentry *dentry,
86483 + const struct vfsmount *mnt);
86484 +void gr_log_chroot_exec(const struct dentry *dentry,
86485 + const struct vfsmount *mnt);
86486 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
86487 +#ifdef CONFIG_COMPAT
86488 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
86489 +#endif
86490 +void gr_log_remount(const char *devname, const int retval);
86491 +void gr_log_unmount(const char *devname, const int retval);
86492 +void gr_log_mount(const char *from, const char *to, const int retval);
86493 +void gr_log_textrel(struct vm_area_struct *vma);
86494 +void gr_log_rwxmmap(struct file *file);
86495 +void gr_log_rwxmprotect(struct file *file);
86496 +
86497 +int gr_handle_follow_link(const struct inode *parent,
86498 + const struct inode *inode,
86499 + const struct dentry *dentry,
86500 + const struct vfsmount *mnt);
86501 +int gr_handle_fifo(const struct dentry *dentry,
86502 + const struct vfsmount *mnt,
86503 + const struct dentry *dir, const int flag,
86504 + const int acc_mode);
86505 +int gr_handle_hardlink(const struct dentry *dentry,
86506 + const struct vfsmount *mnt,
86507 + struct inode *inode,
86508 + const int mode, const char *to);
86509 +
86510 +int gr_is_capable(const int cap);
86511 +int gr_is_capable_nolog(const int cap);
86512 +void gr_learn_resource(const struct task_struct *task, const int limit,
86513 + const unsigned long wanted, const int gt);
86514 +void gr_copy_label(struct task_struct *tsk);
86515 +void gr_handle_crash(struct task_struct *task, const int sig);
86516 +int gr_handle_signal(const struct task_struct *p, const int sig);
86517 +int gr_check_crash_uid(const uid_t uid);
86518 +int gr_check_protected_task(const struct task_struct *task);
86519 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
86520 +int gr_acl_handle_mmap(const struct file *file,
86521 + const unsigned long prot);
86522 +int gr_acl_handle_mprotect(const struct file *file,
86523 + const unsigned long prot);
86524 +int gr_check_hidden_task(const struct task_struct *tsk);
86525 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
86526 + const struct vfsmount *mnt);
86527 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
86528 + const struct vfsmount *mnt);
86529 +__u32 gr_acl_handle_access(const struct dentry *dentry,
86530 + const struct vfsmount *mnt, const int fmode);
86531 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
86532 + const struct vfsmount *mnt, umode_t *mode);
86533 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
86534 + const struct vfsmount *mnt);
86535 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
86536 + const struct vfsmount *mnt);
86537 +int gr_handle_ptrace(struct task_struct *task, const long request);
86538 +int gr_handle_proc_ptrace(struct task_struct *task);
86539 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
86540 + const struct vfsmount *mnt);
86541 +int gr_check_crash_exec(const struct file *filp);
86542 +int gr_acl_is_enabled(void);
86543 +void gr_set_kernel_label(struct task_struct *task);
86544 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
86545 + const gid_t gid);
86546 +int gr_set_proc_label(const struct dentry *dentry,
86547 + const struct vfsmount *mnt,
86548 + const int unsafe_flags);
86549 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
86550 + const struct vfsmount *mnt);
86551 +__u32 gr_acl_handle_open(const struct dentry *dentry,
86552 + const struct vfsmount *mnt, int acc_mode);
86553 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
86554 + const struct dentry *p_dentry,
86555 + const struct vfsmount *p_mnt,
86556 + int open_flags, int acc_mode, const int imode);
86557 +void gr_handle_create(const struct dentry *dentry,
86558 + const struct vfsmount *mnt);
86559 +void gr_handle_proc_create(const struct dentry *dentry,
86560 + const struct inode *inode);
86561 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
86562 + const struct dentry *parent_dentry,
86563 + const struct vfsmount *parent_mnt,
86564 + const int mode);
86565 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
86566 + const struct dentry *parent_dentry,
86567 + const struct vfsmount *parent_mnt);
86568 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
86569 + const struct vfsmount *mnt);
86570 +void gr_handle_delete(const ino_t ino, const dev_t dev);
86571 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
86572 + const struct vfsmount *mnt);
86573 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
86574 + const struct dentry *parent_dentry,
86575 + const struct vfsmount *parent_mnt,
86576 + const char *from);
86577 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
86578 + const struct dentry *parent_dentry,
86579 + const struct vfsmount *parent_mnt,
86580 + const struct dentry *old_dentry,
86581 + const struct vfsmount *old_mnt, const char *to);
86582 +int gr_acl_handle_rename(struct dentry *new_dentry,
86583 + struct dentry *parent_dentry,
86584 + const struct vfsmount *parent_mnt,
86585 + struct dentry *old_dentry,
86586 + struct inode *old_parent_inode,
86587 + struct vfsmount *old_mnt, const char *newname);
86588 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
86589 + struct dentry *old_dentry,
86590 + struct dentry *new_dentry,
86591 + struct vfsmount *mnt, const __u8 replace);
86592 +__u32 gr_check_link(const struct dentry *new_dentry,
86593 + const struct dentry *parent_dentry,
86594 + const struct vfsmount *parent_mnt,
86595 + const struct dentry *old_dentry,
86596 + const struct vfsmount *old_mnt);
86597 +int gr_acl_handle_filldir(const struct file *file, const char *name,
86598 + const unsigned int namelen, const ino_t ino);
86599 +
86600 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
86601 + const struct vfsmount *mnt);
86602 +void gr_acl_handle_exit(void);
86603 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
86604 +int gr_acl_handle_procpidmem(const struct task_struct *task);
86605 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
86606 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
86607 +void gr_audit_ptrace(struct task_struct *task);
86608 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
86609 +
86610 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
86611 +
86612 +#ifdef CONFIG_GRKERNSEC
86613 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
86614 +void gr_handle_vm86(void);
86615 +void gr_handle_mem_readwrite(u64 from, u64 to);
86616 +
86617 +void gr_log_badprocpid(const char *entry);
86618 +
86619 +extern int grsec_enable_dmesg;
86620 +extern int grsec_disable_privio;
86621 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
86622 +extern int grsec_enable_chroot_findtask;
86623 +#endif
86624 +#ifdef CONFIG_GRKERNSEC_SETXID
86625 +extern int grsec_enable_setxid;
86626 +#endif
86627 +#endif
86628 +
86629 +#endif
86630 diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
86631 index 6a87154..a3ce57b 100644
86632 --- a/include/linux/hdpu_features.h
86633 +++ b/include/linux/hdpu_features.h
86634 @@ -3,7 +3,7 @@
86635 struct cpustate_t {
86636 spinlock_t lock;
86637 int excl;
86638 - int open_count;
86639 + atomic_t open_count;
86640 unsigned char cached_val;
86641 int inited;
86642 unsigned long *set_addr;
86643 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
86644 index 211ff44..00ab6d7 100644
86645 --- a/include/linux/highmem.h
86646 +++ b/include/linux/highmem.h
86647 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
86648 kunmap_atomic(kaddr, KM_USER0);
86649 }
86650
86651 +static inline void sanitize_highpage(struct page *page)
86652 +{
86653 + void *kaddr;
86654 + unsigned long flags;
86655 +
86656 + local_irq_save(flags);
86657 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
86658 + clear_page(kaddr);
86659 + kunmap_atomic(kaddr, KM_CLEARPAGE);
86660 + local_irq_restore(flags);
86661 +}
86662 +
86663 static inline void zero_user_segments(struct page *page,
86664 unsigned start1, unsigned end1,
86665 unsigned start2, unsigned end2)
86666 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
86667 index 7b40cda..24eb44e 100644
86668 --- a/include/linux/i2c.h
86669 +++ b/include/linux/i2c.h
86670 @@ -325,6 +325,7 @@ struct i2c_algorithm {
86671 /* To determine what the adapter supports */
86672 u32 (*functionality) (struct i2c_adapter *);
86673 };
86674 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
86675
86676 /*
86677 * i2c_adapter is the structure used to identify a physical i2c bus along
86678 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
86679 index 4c4e57d..f3c5303 100644
86680 --- a/include/linux/i2o.h
86681 +++ b/include/linux/i2o.h
86682 @@ -564,7 +564,7 @@ struct i2o_controller {
86683 struct i2o_device *exec; /* Executive */
86684 #if BITS_PER_LONG == 64
86685 spinlock_t context_list_lock; /* lock for context_list */
86686 - atomic_t context_list_counter; /* needed for unique contexts */
86687 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
86688 struct list_head context_list; /* list of context id's
86689 and pointers */
86690 #endif
86691 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
86692 index 21a6f5d..dc42eab 100644
86693 --- a/include/linux/init_task.h
86694 +++ b/include/linux/init_task.h
86695 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
86696 #define INIT_IDS
86697 #endif
86698
86699 +#ifdef CONFIG_X86
86700 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
86701 +#else
86702 +#define INIT_TASK_THREAD_INFO
86703 +#endif
86704 +
86705 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
86706 /*
86707 * Because of the reduced scope of CAP_SETPCAP when filesystem
86708 @@ -156,6 +162,7 @@ extern struct cred init_cred;
86709 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
86710 .comm = "swapper", \
86711 .thread = INIT_THREAD, \
86712 + INIT_TASK_THREAD_INFO \
86713 .fs = &init_fs, \
86714 .files = &init_files, \
86715 .signal = &init_signals, \
86716 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
86717 index 4f0a72a..a849599 100644
86718 --- a/include/linux/intel-iommu.h
86719 +++ b/include/linux/intel-iommu.h
86720 @@ -296,7 +296,7 @@ struct iommu_flush {
86721 u8 fm, u64 type);
86722 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
86723 unsigned int size_order, u64 type);
86724 -};
86725 +} __no_const;
86726
86727 enum {
86728 SR_DMAR_FECTL_REG,
86729 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
86730 index c739150..be577b5 100644
86731 --- a/include/linux/interrupt.h
86732 +++ b/include/linux/interrupt.h
86733 @@ -369,7 +369,7 @@ enum
86734 /* map softirq index to softirq name. update 'softirq_to_name' in
86735 * kernel/softirq.c when adding a new softirq.
86736 */
86737 -extern char *softirq_to_name[NR_SOFTIRQS];
86738 +extern const char * const softirq_to_name[NR_SOFTIRQS];
86739
86740 /* softirq mask and active fields moved to irq_cpustat_t in
86741 * asm/hardirq.h to get better cache usage. KAO
86742 @@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
86743
86744 struct softirq_action
86745 {
86746 - void (*action)(struct softirq_action *);
86747 + void (*action)(void);
86748 };
86749
86750 asmlinkage void do_softirq(void);
86751 asmlinkage void __do_softirq(void);
86752 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
86753 +extern void open_softirq(int nr, void (*action)(void));
86754 extern void softirq_init(void);
86755 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
86756 extern void raise_softirq_irqoff(unsigned int nr);
86757 diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
86758 index eb73632..19abfc1 100644
86759 --- a/include/linux/iocontext.h
86760 +++ b/include/linux/iocontext.h
86761 @@ -94,14 +94,15 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
86762 return NULL;
86763 }
86764
86765 +struct task_struct;
86766 #ifdef CONFIG_BLOCK
86767 int put_io_context(struct io_context *ioc);
86768 -void exit_io_context(void);
86769 +void exit_io_context(struct task_struct *task);
86770 struct io_context *get_io_context(gfp_t gfp_flags, int node);
86771 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
86772 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
86773 #else
86774 -static inline void exit_io_context(void)
86775 +static inline void exit_io_context(struct task_struct *task)
86776 {
86777 }
86778
86779 diff --git a/include/linux/irq.h b/include/linux/irq.h
86780 index 9e5f45a..025865b 100644
86781 --- a/include/linux/irq.h
86782 +++ b/include/linux/irq.h
86783 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
86784 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
86785 bool boot)
86786 {
86787 +#ifdef CONFIG_CPUMASK_OFFSTACK
86788 gfp_t gfp = GFP_ATOMIC;
86789
86790 if (boot)
86791 gfp = GFP_NOWAIT;
86792
86793 -#ifdef CONFIG_CPUMASK_OFFSTACK
86794 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
86795 return false;
86796
86797 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
86798 index 7922742..27306a2 100644
86799 --- a/include/linux/kallsyms.h
86800 +++ b/include/linux/kallsyms.h
86801 @@ -15,7 +15,8 @@
86802
86803 struct module;
86804
86805 -#ifdef CONFIG_KALLSYMS
86806 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
86807 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
86808 /* Lookup the address for a symbol. Returns 0 if not found. */
86809 unsigned long kallsyms_lookup_name(const char *name);
86810
86811 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
86812 /* Stupid that this does nothing, but I didn't create this mess. */
86813 #define __print_symbol(fmt, addr)
86814 #endif /*CONFIG_KALLSYMS*/
86815 +#else /* when included by kallsyms.c, vsnprintf.c, or
86816 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
86817 +extern void __print_symbol(const char *fmt, unsigned long address);
86818 +extern int sprint_symbol(char *buffer, unsigned long address);
86819 +const char *kallsyms_lookup(unsigned long addr,
86820 + unsigned long *symbolsize,
86821 + unsigned long *offset,
86822 + char **modname, char *namebuf);
86823 +#endif
86824
86825 /* This macro allows us to keep printk typechecking */
86826 static void __check_printsym_format(const char *fmt, ...)
86827 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
86828 index 6adcc29..13369e8 100644
86829 --- a/include/linux/kgdb.h
86830 +++ b/include/linux/kgdb.h
86831 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
86832
86833 extern int kgdb_connected;
86834
86835 -extern atomic_t kgdb_setting_breakpoint;
86836 -extern atomic_t kgdb_cpu_doing_single_step;
86837 +extern atomic_unchecked_t kgdb_setting_breakpoint;
86838 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
86839
86840 extern struct task_struct *kgdb_usethread;
86841 extern struct task_struct *kgdb_contthread;
86842 @@ -235,7 +235,7 @@ struct kgdb_arch {
86843 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
86844 void (*remove_all_hw_break)(void);
86845 void (*correct_hw_break)(void);
86846 -};
86847 +} __do_const;
86848
86849 /**
86850 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
86851 @@ -257,14 +257,14 @@ struct kgdb_io {
86852 int (*init) (void);
86853 void (*pre_exception) (void);
86854 void (*post_exception) (void);
86855 -};
86856 +} __do_const;
86857
86858 -extern struct kgdb_arch arch_kgdb_ops;
86859 +extern const struct kgdb_arch arch_kgdb_ops;
86860
86861 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
86862
86863 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
86864 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
86865 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
86866 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
86867
86868 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
86869 extern int kgdb_mem2hex(char *mem, char *buf, int count);
86870 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
86871 index 0546fe7..2a22bc1 100644
86872 --- a/include/linux/kmod.h
86873 +++ b/include/linux/kmod.h
86874 @@ -31,6 +31,8 @@
86875 * usually useless though. */
86876 extern int __request_module(bool wait, const char *name, ...) \
86877 __attribute__((format(printf, 2, 3)));
86878 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
86879 + __attribute__((format(printf, 3, 4)));
86880 #define request_module(mod...) __request_module(true, mod)
86881 #define request_module_nowait(mod...) __request_module(false, mod)
86882 #define try_then_request_module(x, mod...) \
86883 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
86884 index 58ae8e0..3950d3c 100644
86885 --- a/include/linux/kobject.h
86886 +++ b/include/linux/kobject.h
86887 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
86888
86889 struct kobj_type {
86890 void (*release)(struct kobject *kobj);
86891 - struct sysfs_ops *sysfs_ops;
86892 + const struct sysfs_ops *sysfs_ops;
86893 struct attribute **default_attrs;
86894 };
86895
86896 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
86897 };
86898
86899 struct kset_uevent_ops {
86900 - int (*filter)(struct kset *kset, struct kobject *kobj);
86901 - const char *(*name)(struct kset *kset, struct kobject *kobj);
86902 - int (*uevent)(struct kset *kset, struct kobject *kobj,
86903 + int (* const filter)(struct kset *kset, struct kobject *kobj);
86904 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
86905 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
86906 struct kobj_uevent_env *env);
86907 };
86908
86909 @@ -132,7 +132,7 @@ struct kobj_attribute {
86910 const char *buf, size_t count);
86911 };
86912
86913 -extern struct sysfs_ops kobj_sysfs_ops;
86914 +extern const struct sysfs_ops kobj_sysfs_ops;
86915
86916 /**
86917 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
86918 @@ -155,14 +155,14 @@ struct kset {
86919 struct list_head list;
86920 spinlock_t list_lock;
86921 struct kobject kobj;
86922 - struct kset_uevent_ops *uevent_ops;
86923 + const struct kset_uevent_ops *uevent_ops;
86924 };
86925
86926 extern void kset_init(struct kset *kset);
86927 extern int __must_check kset_register(struct kset *kset);
86928 extern void kset_unregister(struct kset *kset);
86929 extern struct kset * __must_check kset_create_and_add(const char *name,
86930 - struct kset_uevent_ops *u,
86931 + const struct kset_uevent_ops *u,
86932 struct kobject *parent_kobj);
86933
86934 static inline struct kset *to_kset(struct kobject *kobj)
86935 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
86936 index c728a50..762821f 100644
86937 --- a/include/linux/kvm_host.h
86938 +++ b/include/linux/kvm_host.h
86939 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
86940 void vcpu_load(struct kvm_vcpu *vcpu);
86941 void vcpu_put(struct kvm_vcpu *vcpu);
86942
86943 -int kvm_init(void *opaque, unsigned int vcpu_size,
86944 +int kvm_init(const void *opaque, unsigned int vcpu_size,
86945 struct module *module);
86946 void kvm_exit(void);
86947
86948 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
86949 struct kvm_guest_debug *dbg);
86950 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
86951
86952 -int kvm_arch_init(void *opaque);
86953 +int kvm_arch_init(const void *opaque);
86954 void kvm_arch_exit(void);
86955
86956 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
86957 @@ -519,7 +519,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
86958 int kvm_set_irq_routing(struct kvm *kvm,
86959 const struct kvm_irq_routing_entry *entries,
86960 unsigned nr,
86961 - unsigned flags);
86962 + unsigned flags) __size_overflow(3);
86963 void kvm_free_irq_routing(struct kvm *kvm);
86964
86965 #else
86966 diff --git a/include/linux/libata.h b/include/linux/libata.h
86967 index a069916..223edde 100644
86968 --- a/include/linux/libata.h
86969 +++ b/include/linux/libata.h
86970 @@ -525,11 +525,11 @@ struct ata_ioports {
86971
86972 struct ata_host {
86973 spinlock_t lock;
86974 - struct device *dev;
86975 + struct device *dev;
86976 void __iomem * const *iomap;
86977 unsigned int n_ports;
86978 void *private_data;
86979 - struct ata_port_operations *ops;
86980 + const struct ata_port_operations *ops;
86981 unsigned long flags;
86982 #ifdef CONFIG_ATA_ACPI
86983 acpi_handle acpi_handle;
86984 @@ -710,7 +710,7 @@ struct ata_link {
86985
86986 struct ata_port {
86987 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
86988 - struct ata_port_operations *ops;
86989 + const struct ata_port_operations *ops;
86990 spinlock_t *lock;
86991 /* Flags owned by the EH context. Only EH should touch these once the
86992 port is active */
86993 @@ -884,7 +884,7 @@ struct ata_port_operations {
86994 * fields must be pointers.
86995 */
86996 const struct ata_port_operations *inherits;
86997 -};
86998 +} __do_const;
86999
87000 struct ata_port_info {
87001 unsigned long flags;
87002 @@ -892,7 +892,7 @@ struct ata_port_info {
87003 unsigned long pio_mask;
87004 unsigned long mwdma_mask;
87005 unsigned long udma_mask;
87006 - struct ata_port_operations *port_ops;
87007 + const struct ata_port_operations *port_ops;
87008 void *private_data;
87009 };
87010
87011 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
87012 extern const unsigned long sata_deb_timing_hotplug[];
87013 extern const unsigned long sata_deb_timing_long[];
87014
87015 -extern struct ata_port_operations ata_dummy_port_ops;
87016 +extern const struct ata_port_operations ata_dummy_port_ops;
87017 extern const struct ata_port_info ata_dummy_port_info;
87018
87019 static inline const unsigned long *
87020 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
87021 struct scsi_host_template *sht);
87022 extern void ata_host_detach(struct ata_host *host);
87023 extern void ata_host_init(struct ata_host *, struct device *,
87024 - unsigned long, struct ata_port_operations *);
87025 + unsigned long, const struct ata_port_operations *);
87026 extern int ata_scsi_detect(struct scsi_host_template *sht);
87027 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
87028 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
87029 diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
87030 index fbc48f8..0886e57 100644
87031 --- a/include/linux/lockd/bind.h
87032 +++ b/include/linux/lockd/bind.h
87033 @@ -23,13 +23,13 @@ struct svc_rqst;
87034 * This is the set of functions for lockd->nfsd communication
87035 */
87036 struct nlmsvc_binding {
87037 - __be32 (*fopen)(struct svc_rqst *,
87038 + __be32 (* const fopen)(struct svc_rqst *,
87039 struct nfs_fh *,
87040 struct file **);
87041 - void (*fclose)(struct file *);
87042 + void (* const fclose)(struct file *);
87043 };
87044
87045 -extern struct nlmsvc_binding * nlmsvc_ops;
87046 +extern const struct nlmsvc_binding * nlmsvc_ops;
87047
87048 /*
87049 * Similar to nfs_client_initdata, but without the NFS-specific
87050 diff --git a/include/linux/mca.h b/include/linux/mca.h
87051 index 3797270..7765ede 100644
87052 --- a/include/linux/mca.h
87053 +++ b/include/linux/mca.h
87054 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
87055 int region);
87056 void * (*mca_transform_memory)(struct mca_device *,
87057 void *memory);
87058 -};
87059 +} __no_const;
87060
87061 struct mca_bus {
87062 u64 default_dma_mask;
87063 diff --git a/include/linux/memory.h b/include/linux/memory.h
87064 index 37fa19b..b597c85 100644
87065 --- a/include/linux/memory.h
87066 +++ b/include/linux/memory.h
87067 @@ -108,7 +108,7 @@ struct memory_accessor {
87068 size_t count);
87069 ssize_t (*write)(struct memory_accessor *, const char *buf,
87070 off_t offset, size_t count);
87071 -};
87072 +} __no_const;
87073
87074 /*
87075 * Kernel text modification mutex, used for code patching. Users of this lock
87076 diff --git a/include/linux/mm.h b/include/linux/mm.h
87077 index 11e5be6..1ff2423 100644
87078 --- a/include/linux/mm.h
87079 +++ b/include/linux/mm.h
87080 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
87081
87082 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
87083 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
87084 +
87085 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87086 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
87087 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
87088 +#else
87089 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
87090 +#endif
87091 +
87092 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
87093 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
87094
87095 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
87096 int set_page_dirty_lock(struct page *page);
87097 int clear_page_dirty_for_io(struct page *page);
87098
87099 -/* Is the vma a continuation of the stack vma above it? */
87100 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
87101 -{
87102 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
87103 -}
87104 -
87105 extern unsigned long move_page_tables(struct vm_area_struct *vma,
87106 unsigned long old_addr, struct vm_area_struct *new_vma,
87107 unsigned long new_addr, unsigned long len);
87108 @@ -890,6 +891,8 @@ struct shrinker {
87109 extern void register_shrinker(struct shrinker *);
87110 extern void unregister_shrinker(struct shrinker *);
87111
87112 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
87113 +
87114 int vma_wants_writenotify(struct vm_area_struct *vma);
87115
87116 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
87117 @@ -1162,6 +1165,7 @@ out:
87118 }
87119
87120 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
87121 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
87122
87123 extern unsigned long do_brk(unsigned long, unsigned long);
87124
87125 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
87126 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
87127 struct vm_area_struct **pprev);
87128
87129 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
87130 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
87131 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
87132 +
87133 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
87134 NULL if none. Assume start_addr < end_addr. */
87135 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
87136 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
87137 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
87138 }
87139
87140 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
87141 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
87142 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
87143 unsigned long pfn, unsigned long size, pgprot_t);
87144 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
87145 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
87146 extern int sysctl_memory_failure_early_kill;
87147 extern int sysctl_memory_failure_recovery;
87148 -extern atomic_long_t mce_bad_pages;
87149 +extern atomic_long_unchecked_t mce_bad_pages;
87150 +
87151 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
87152 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
87153 +#else
87154 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
87155 +#endif
87156
87157 #endif /* __KERNEL__ */
87158 #endif /* _LINUX_MM_H */
87159 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
87160 index 9d12ed5..6d9707a 100644
87161 --- a/include/linux/mm_types.h
87162 +++ b/include/linux/mm_types.h
87163 @@ -186,6 +186,8 @@ struct vm_area_struct {
87164 #ifdef CONFIG_NUMA
87165 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
87166 #endif
87167 +
87168 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
87169 };
87170
87171 struct core_thread {
87172 @@ -287,6 +289,24 @@ struct mm_struct {
87173 #ifdef CONFIG_MMU_NOTIFIER
87174 struct mmu_notifier_mm *mmu_notifier_mm;
87175 #endif
87176 +
87177 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
87178 + unsigned long pax_flags;
87179 +#endif
87180 +
87181 +#ifdef CONFIG_PAX_DLRESOLVE
87182 + unsigned long call_dl_resolve;
87183 +#endif
87184 +
87185 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
87186 + unsigned long call_syscall;
87187 +#endif
87188 +
87189 +#ifdef CONFIG_PAX_ASLR
87190 + unsigned long delta_mmap; /* randomized offset */
87191 + unsigned long delta_stack; /* randomized offset */
87192 +#endif
87193 +
87194 };
87195
87196 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
87197 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
87198 index 4e02ee2..afb159e 100644
87199 --- a/include/linux/mmu_notifier.h
87200 +++ b/include/linux/mmu_notifier.h
87201 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
87202 */
87203 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
87204 ({ \
87205 - pte_t __pte; \
87206 + pte_t ___pte; \
87207 struct vm_area_struct *___vma = __vma; \
87208 unsigned long ___address = __address; \
87209 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
87210 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
87211 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
87212 - __pte; \
87213 + ___pte; \
87214 })
87215
87216 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
87217 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
87218 index 6c31a2a..4b0e930 100644
87219 --- a/include/linux/mmzone.h
87220 +++ b/include/linux/mmzone.h
87221 @@ -350,7 +350,7 @@ struct zone {
87222 unsigned long flags; /* zone flags, see below */
87223
87224 /* Zone statistics */
87225 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87226 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87227
87228 /*
87229 * prev_priority holds the scanning priority for this zone. It is
87230 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
87231 index f58e9d8..3503935 100644
87232 --- a/include/linux/mod_devicetable.h
87233 +++ b/include/linux/mod_devicetable.h
87234 @@ -12,7 +12,7 @@
87235 typedef unsigned long kernel_ulong_t;
87236 #endif
87237
87238 -#define PCI_ANY_ID (~0)
87239 +#define PCI_ANY_ID ((__u16)~0)
87240
87241 struct pci_device_id {
87242 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
87243 @@ -131,7 +131,7 @@ struct usb_device_id {
87244 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
87245 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
87246
87247 -#define HID_ANY_ID (~0)
87248 +#define HID_ANY_ID (~0U)
87249
87250 struct hid_device_id {
87251 __u16 bus;
87252 diff --git a/include/linux/module.h b/include/linux/module.h
87253 index 482efc8..642032b 100644
87254 --- a/include/linux/module.h
87255 +++ b/include/linux/module.h
87256 @@ -16,6 +16,7 @@
87257 #include <linux/kobject.h>
87258 #include <linux/moduleparam.h>
87259 #include <linux/tracepoint.h>
87260 +#include <linux/fs.h>
87261
87262 #include <asm/local.h>
87263 #include <asm/module.h>
87264 @@ -287,16 +288,16 @@ struct module
87265 int (*init)(void);
87266
87267 /* If this is non-NULL, vfree after init() returns */
87268 - void *module_init;
87269 + void *module_init_rx, *module_init_rw;
87270
87271 /* Here is the actual code + data, vfree'd on unload. */
87272 - void *module_core;
87273 + void *module_core_rx, *module_core_rw;
87274
87275 /* Here are the sizes of the init and core sections */
87276 - unsigned int init_size, core_size;
87277 + unsigned int init_size_rw, core_size_rw;
87278
87279 /* The size of the executable code in each section. */
87280 - unsigned int init_text_size, core_text_size;
87281 + unsigned int init_size_rx, core_size_rx;
87282
87283 /* Arch-specific module values */
87284 struct mod_arch_specific arch;
87285 @@ -345,6 +346,10 @@ struct module
87286 #ifdef CONFIG_EVENT_TRACING
87287 struct ftrace_event_call *trace_events;
87288 unsigned int num_trace_events;
87289 + struct file_operations trace_id;
87290 + struct file_operations trace_enable;
87291 + struct file_operations trace_format;
87292 + struct file_operations trace_filter;
87293 #endif
87294 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
87295 unsigned long *ftrace_callsites;
87296 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
87297 bool is_module_address(unsigned long addr);
87298 bool is_module_text_address(unsigned long addr);
87299
87300 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
87301 +{
87302 +
87303 +#ifdef CONFIG_PAX_KERNEXEC
87304 + if (ktla_ktva(addr) >= (unsigned long)start &&
87305 + ktla_ktva(addr) < (unsigned long)start + size)
87306 + return 1;
87307 +#endif
87308 +
87309 + return ((void *)addr >= start && (void *)addr < start + size);
87310 +}
87311 +
87312 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
87313 +{
87314 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
87315 +}
87316 +
87317 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
87318 +{
87319 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
87320 +}
87321 +
87322 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
87323 +{
87324 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
87325 +}
87326 +
87327 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
87328 +{
87329 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
87330 +}
87331 +
87332 static inline int within_module_core(unsigned long addr, struct module *mod)
87333 {
87334 - return (unsigned long)mod->module_core <= addr &&
87335 - addr < (unsigned long)mod->module_core + mod->core_size;
87336 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
87337 }
87338
87339 static inline int within_module_init(unsigned long addr, struct module *mod)
87340 {
87341 - return (unsigned long)mod->module_init <= addr &&
87342 - addr < (unsigned long)mod->module_init + mod->init_size;
87343 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
87344 }
87345
87346 /* Search for module by name: must hold module_mutex. */
87347 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
87348 index c1f40c2..e875ff4 100644
87349 --- a/include/linux/moduleloader.h
87350 +++ b/include/linux/moduleloader.h
87351 @@ -18,11 +18,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
87352
87353 /* Allocator used for allocating struct module, core sections and init
87354 sections. Returns NULL on failure. */
87355 -void *module_alloc(unsigned long size);
87356 +void *module_alloc(unsigned long size) __size_overflow(1);
87357 +
87358 +#ifdef CONFIG_PAX_KERNEXEC
87359 +void *module_alloc_exec(unsigned long size);
87360 +#else
87361 +#define module_alloc_exec(x) module_alloc(x)
87362 +#endif
87363
87364 /* Free memory returned from module_alloc. */
87365 void module_free(struct module *mod, void *module_region);
87366
87367 +#ifdef CONFIG_PAX_KERNEXEC
87368 +void module_free_exec(struct module *mod, void *module_region);
87369 +#else
87370 +#define module_free_exec(x, y) module_free((x), (y))
87371 +#endif
87372 +
87373 /* Apply the given relocation to the (simplified) ELF. Return -error
87374 or 0. */
87375 int apply_relocate(Elf_Shdr *sechdrs,
87376 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
87377 index 82a9124..8a5f622 100644
87378 --- a/include/linux/moduleparam.h
87379 +++ b/include/linux/moduleparam.h
87380 @@ -132,7 +132,7 @@ struct kparam_array
87381
87382 /* Actually copy string: maxlen param is usually sizeof(string). */
87383 #define module_param_string(name, string, len, perm) \
87384 - static const struct kparam_string __param_string_##name \
87385 + static const struct kparam_string __param_string_##name __used \
87386 = { len, string }; \
87387 __module_param_call(MODULE_PARAM_PREFIX, name, \
87388 param_set_copystring, param_get_string, \
87389 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
87390
87391 /* Comma-separated array: *nump is set to number they actually specified. */
87392 #define module_param_array_named(name, array, type, nump, perm) \
87393 - static const struct kparam_array __param_arr_##name \
87394 + static const struct kparam_array __param_arr_##name __used \
87395 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
87396 sizeof(array[0]), array }; \
87397 __module_param_call(MODULE_PARAM_PREFIX, name, \
87398 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
87399 index 878cab4..c92cb3e 100644
87400 --- a/include/linux/mutex.h
87401 +++ b/include/linux/mutex.h
87402 @@ -51,7 +51,7 @@ struct mutex {
87403 spinlock_t wait_lock;
87404 struct list_head wait_list;
87405 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
87406 - struct thread_info *owner;
87407 + struct task_struct *owner;
87408 #endif
87409 #ifdef CONFIG_DEBUG_MUTEXES
87410 const char *name;
87411 diff --git a/include/linux/namei.h b/include/linux/namei.h
87412 index ec0f607..d19e675 100644
87413 --- a/include/linux/namei.h
87414 +++ b/include/linux/namei.h
87415 @@ -22,7 +22,7 @@ struct nameidata {
87416 unsigned int flags;
87417 int last_type;
87418 unsigned depth;
87419 - char *saved_names[MAX_NESTED_LINKS + 1];
87420 + const char *saved_names[MAX_NESTED_LINKS + 1];
87421
87422 /* Intent data */
87423 union {
87424 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
87425 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
87426 extern void unlock_rename(struct dentry *, struct dentry *);
87427
87428 -static inline void nd_set_link(struct nameidata *nd, char *path)
87429 +static inline void nd_set_link(struct nameidata *nd, const char *path)
87430 {
87431 nd->saved_names[nd->depth] = path;
87432 }
87433
87434 -static inline char *nd_get_link(struct nameidata *nd)
87435 +static inline const char *nd_get_link(const struct nameidata *nd)
87436 {
87437 return nd->saved_names[nd->depth];
87438 }
87439 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
87440 index 9d7e8f7..04428c5 100644
87441 --- a/include/linux/netdevice.h
87442 +++ b/include/linux/netdevice.h
87443 @@ -637,6 +637,7 @@ struct net_device_ops {
87444 u16 xid);
87445 #endif
87446 };
87447 +typedef struct net_device_ops __no_const net_device_ops_no_const;
87448
87449 /*
87450 * The DEVICE structure.
87451 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
87452 new file mode 100644
87453 index 0000000..33f4af8
87454 --- /dev/null
87455 +++ b/include/linux/netfilter/xt_gradm.h
87456 @@ -0,0 +1,9 @@
87457 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
87458 +#define _LINUX_NETFILTER_XT_GRADM_H 1
87459 +
87460 +struct xt_gradm_mtinfo {
87461 + __u16 flags;
87462 + __u16 invflags;
87463 +};
87464 +
87465 +#endif
87466 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
87467 index b359c4a..c08b334 100644
87468 --- a/include/linux/nodemask.h
87469 +++ b/include/linux/nodemask.h
87470 @@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
87471
87472 #define any_online_node(mask) \
87473 ({ \
87474 - int node; \
87475 - for_each_node_mask(node, (mask)) \
87476 - if (node_online(node)) \
87477 + int __node; \
87478 + for_each_node_mask(__node, (mask)) \
87479 + if (node_online(__node)) \
87480 break; \
87481 - node; \
87482 + __node; \
87483 })
87484
87485 #define num_online_nodes() num_node_state(N_ONLINE)
87486 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
87487 index 5171639..81f30d3 100644
87488 --- a/include/linux/oprofile.h
87489 +++ b/include/linux/oprofile.h
87490 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
87491 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
87492 char const * name, ulong * val);
87493
87494 -/** Create a file for read-only access to an atomic_t. */
87495 +/** Create a file for read-only access to an atomic_unchecked_t. */
87496 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
87497 - char const * name, atomic_t * val);
87498 + char const * name, atomic_unchecked_t * val);
87499
87500 /** create a directory */
87501 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
87502 @@ -153,7 +153,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
87503 * Read an ASCII string for a number from a userspace buffer and fill *val on success.
87504 * Returns 0 on success, < 0 on error.
87505 */
87506 -int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
87507 +int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) __size_overflow(3);
87508
87509 /** lock for read/write safety */
87510 extern spinlock_t oprofilefs_lock;
87511 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
87512 index 3c62ed4..8924c7c 100644
87513 --- a/include/linux/pagemap.h
87514 +++ b/include/linux/pagemap.h
87515 @@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
87516 if (((unsigned long)uaddr & PAGE_MASK) !=
87517 ((unsigned long)end & PAGE_MASK))
87518 ret = __get_user(c, end);
87519 + (void)c;
87520 }
87521 + (void)c;
87522 return ret;
87523 }
87524
87525 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
87526 index 81c9689..a567a55 100644
87527 --- a/include/linux/perf_event.h
87528 +++ b/include/linux/perf_event.h
87529 @@ -476,7 +476,7 @@ struct hw_perf_event {
87530 struct hrtimer hrtimer;
87531 };
87532 };
87533 - atomic64_t prev_count;
87534 + atomic64_unchecked_t prev_count;
87535 u64 sample_period;
87536 u64 last_period;
87537 atomic64_t period_left;
87538 @@ -557,7 +557,7 @@ struct perf_event {
87539 const struct pmu *pmu;
87540
87541 enum perf_event_active_state state;
87542 - atomic64_t count;
87543 + atomic64_unchecked_t count;
87544
87545 /*
87546 * These are the total time in nanoseconds that the event
87547 @@ -595,8 +595,8 @@ struct perf_event {
87548 * These accumulate total time (in nanoseconds) that children
87549 * events have been enabled and running, respectively.
87550 */
87551 - atomic64_t child_total_time_enabled;
87552 - atomic64_t child_total_time_running;
87553 + atomic64_unchecked_t child_total_time_enabled;
87554 + atomic64_unchecked_t child_total_time_running;
87555
87556 /*
87557 * Protect attach/detach and child_list:
87558 diff --git a/include/linux/personality.h b/include/linux/personality.h
87559 index 1261208..ddef96f 100644
87560 --- a/include/linux/personality.h
87561 +++ b/include/linux/personality.h
87562 @@ -43,6 +43,7 @@ enum {
87563 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87564 ADDR_NO_RANDOMIZE | \
87565 ADDR_COMPAT_LAYOUT | \
87566 + ADDR_LIMIT_3GB | \
87567 MMAP_PAGE_ZERO)
87568
87569 /*
87570 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
87571 index b43a9e0..b77d869 100644
87572 --- a/include/linux/pipe_fs_i.h
87573 +++ b/include/linux/pipe_fs_i.h
87574 @@ -46,9 +46,9 @@ struct pipe_inode_info {
87575 wait_queue_head_t wait;
87576 unsigned int nrbufs, curbuf;
87577 struct page *tmp_page;
87578 - unsigned int readers;
87579 - unsigned int writers;
87580 - unsigned int waiting_writers;
87581 + atomic_t readers;
87582 + atomic_t writers;
87583 + atomic_t waiting_writers;
87584 unsigned int r_counter;
87585 unsigned int w_counter;
87586 struct fasync_struct *fasync_readers;
87587 diff --git a/include/linux/poison.h b/include/linux/poison.h
87588 index 34066ff..e95d744 100644
87589 --- a/include/linux/poison.h
87590 +++ b/include/linux/poison.h
87591 @@ -19,8 +19,8 @@
87592 * under normal circumstances, used to verify that nobody uses
87593 * non-initialized list entries.
87594 */
87595 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
87596 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
87597 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
87598 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
87599
87600 /********** include/linux/timer.h **********/
87601 /*
87602 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
87603 index 4f71bf4..cd2f68e 100644
87604 --- a/include/linux/posix-timers.h
87605 +++ b/include/linux/posix-timers.h
87606 @@ -82,7 +82,8 @@ struct k_clock {
87607 #define TIMER_RETRY 1
87608 void (*timer_get) (struct k_itimer * timr,
87609 struct itimerspec * cur_setting);
87610 -};
87611 +} __do_const;
87612 +typedef struct k_clock __no_const k_clock_no_const;
87613
87614 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
87615
87616 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
87617 index 72b1a10..13303a9 100644
87618 --- a/include/linux/preempt.h
87619 +++ b/include/linux/preempt.h
87620 @@ -110,7 +110,7 @@ struct preempt_ops {
87621 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
87622 void (*sched_out)(struct preempt_notifier *notifier,
87623 struct task_struct *next);
87624 -};
87625 +} __no_const;
87626
87627 /**
87628 * preempt_notifier - key for installing preemption notifiers
87629 diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
87630 index af7c36a..a93005c 100644
87631 --- a/include/linux/prefetch.h
87632 +++ b/include/linux/prefetch.h
87633 @@ -11,6 +11,7 @@
87634 #define _LINUX_PREFETCH_H
87635
87636 #include <linux/types.h>
87637 +#include <linux/const.h>
87638 #include <asm/processor.h>
87639 #include <asm/cache.h>
87640
87641 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
87642 index 379eaed..1bf73e3 100644
87643 --- a/include/linux/proc_fs.h
87644 +++ b/include/linux/proc_fs.h
87645 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
87646 return proc_create_data(name, mode, parent, proc_fops, NULL);
87647 }
87648
87649 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
87650 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
87651 +{
87652 +#ifdef CONFIG_GRKERNSEC_PROC_USER
87653 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
87654 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
87655 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
87656 +#else
87657 + return proc_create_data(name, mode, parent, proc_fops, NULL);
87658 +#endif
87659 +}
87660 +
87661 +
87662 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
87663 mode_t mode, struct proc_dir_entry *base,
87664 read_proc_t *read_proc, void * data)
87665 @@ -256,7 +269,7 @@ union proc_op {
87666 int (*proc_show)(struct seq_file *m,
87667 struct pid_namespace *ns, struct pid *pid,
87668 struct task_struct *task);
87669 -};
87670 +} __no_const;
87671
87672 struct ctl_table_header;
87673 struct ctl_table;
87674 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
87675 index 7456d7d..6c1cfc9 100644
87676 --- a/include/linux/ptrace.h
87677 +++ b/include/linux/ptrace.h
87678 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
87679 extern void exit_ptrace(struct task_struct *tracer);
87680 #define PTRACE_MODE_READ 1
87681 #define PTRACE_MODE_ATTACH 2
87682 -/* Returns 0 on success, -errno on denial. */
87683 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
87684 /* Returns true on success, false on denial. */
87685 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
87686 +/* Returns true on success, false on denial. */
87687 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
87688
87689 static inline int ptrace_reparented(struct task_struct *child)
87690 {
87691 diff --git a/include/linux/random.h b/include/linux/random.h
87692 index 2948046..3262567 100644
87693 --- a/include/linux/random.h
87694 +++ b/include/linux/random.h
87695 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
87696 u32 random32(void);
87697 void srandom32(u32 seed);
87698
87699 +static inline unsigned long pax_get_random_long(void)
87700 +{
87701 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
87702 +}
87703 +
87704 #endif /* __KERNEL___ */
87705
87706 #endif /* _LINUX_RANDOM_H */
87707 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
87708 index 988e55f..17cb4ef 100644
87709 --- a/include/linux/reboot.h
87710 +++ b/include/linux/reboot.h
87711 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
87712 * Architecture-specific implementations of sys_reboot commands.
87713 */
87714
87715 -extern void machine_restart(char *cmd);
87716 -extern void machine_halt(void);
87717 -extern void machine_power_off(void);
87718 +extern void machine_restart(char *cmd) __noreturn;
87719 +extern void machine_halt(void) __noreturn;
87720 +extern void machine_power_off(void) __noreturn;
87721
87722 extern void machine_shutdown(void);
87723 struct pt_regs;
87724 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
87725 */
87726
87727 extern void kernel_restart_prepare(char *cmd);
87728 -extern void kernel_restart(char *cmd);
87729 -extern void kernel_halt(void);
87730 -extern void kernel_power_off(void);
87731 +extern void kernel_restart(char *cmd) __noreturn;
87732 +extern void kernel_halt(void) __noreturn;
87733 +extern void kernel_power_off(void) __noreturn;
87734
87735 void ctrl_alt_del(void);
87736
87737 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
87738 * Emergency restart, callable from an interrupt handler.
87739 */
87740
87741 -extern void emergency_restart(void);
87742 +extern void emergency_restart(void) __noreturn;
87743 #include <asm/emergency-restart.h>
87744
87745 #endif
87746 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
87747 index dd31e7b..5b03c5c 100644
87748 --- a/include/linux/reiserfs_fs.h
87749 +++ b/include/linux/reiserfs_fs.h
87750 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
87751 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
87752
87753 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
87754 -#define get_generation(s) atomic_read (&fs_generation(s))
87755 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
87756 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
87757 #define __fs_changed(gen,s) (gen != get_generation (s))
87758 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
87759 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
87760 */
87761
87762 struct item_operations {
87763 - int (*bytes_number) (struct item_head * ih, int block_size);
87764 - void (*decrement_key) (struct cpu_key *);
87765 - int (*is_left_mergeable) (struct reiserfs_key * ih,
87766 + int (* const bytes_number) (struct item_head * ih, int block_size);
87767 + void (* const decrement_key) (struct cpu_key *);
87768 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
87769 unsigned long bsize);
87770 - void (*print_item) (struct item_head *, char *item);
87771 - void (*check_item) (struct item_head *, char *item);
87772 + void (* const print_item) (struct item_head *, char *item);
87773 + void (* const check_item) (struct item_head *, char *item);
87774
87775 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
87776 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
87777 int is_affected, int insert_size);
87778 - int (*check_left) (struct virtual_item * vi, int free,
87779 + int (* const check_left) (struct virtual_item * vi, int free,
87780 int start_skip, int end_skip);
87781 - int (*check_right) (struct virtual_item * vi, int free);
87782 - int (*part_size) (struct virtual_item * vi, int from, int to);
87783 - int (*unit_num) (struct virtual_item * vi);
87784 - void (*print_vi) (struct virtual_item * vi);
87785 + int (* const check_right) (struct virtual_item * vi, int free);
87786 + int (* const part_size) (struct virtual_item * vi, int from, int to);
87787 + int (* const unit_num) (struct virtual_item * vi);
87788 + void (* const print_vi) (struct virtual_item * vi);
87789 };
87790
87791 -extern struct item_operations *item_ops[TYPE_ANY + 1];
87792 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
87793
87794 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
87795 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
87796 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
87797 index dab68bb..0688727 100644
87798 --- a/include/linux/reiserfs_fs_sb.h
87799 +++ b/include/linux/reiserfs_fs_sb.h
87800 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
87801 /* Comment? -Hans */
87802 wait_queue_head_t s_wait;
87803 /* To be obsoleted soon by per buffer seals.. -Hans */
87804 - atomic_t s_generation_counter; // increased by one every time the
87805 + atomic_unchecked_t s_generation_counter; // increased by one every time the
87806 // tree gets re-balanced
87807 unsigned long s_properties; /* File system properties. Currently holds
87808 on-disk FS format */
87809 diff --git a/include/linux/relay.h b/include/linux/relay.h
87810 index 14a86bc..17d0700 100644
87811 --- a/include/linux/relay.h
87812 +++ b/include/linux/relay.h
87813 @@ -159,7 +159,7 @@ struct rchan_callbacks
87814 * The callback should return 0 if successful, negative if not.
87815 */
87816 int (*remove_buf_file)(struct dentry *dentry);
87817 -};
87818 +} __no_const;
87819
87820 /*
87821 * CONFIG_RELAY kernel API, kernel/relay.c
87822 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
87823 index 3392c59..a746428 100644
87824 --- a/include/linux/rfkill.h
87825 +++ b/include/linux/rfkill.h
87826 @@ -144,6 +144,7 @@ struct rfkill_ops {
87827 void (*query)(struct rfkill *rfkill, void *data);
87828 int (*set_block)(void *data, bool blocked);
87829 };
87830 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
87831
87832 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
87833 /**
87834 diff --git a/include/linux/sched.h b/include/linux/sched.h
87835 index 71849bf..8cf9dd2 100644
87836 --- a/include/linux/sched.h
87837 +++ b/include/linux/sched.h
87838 @@ -101,6 +101,7 @@ struct bio;
87839 struct fs_struct;
87840 struct bts_context;
87841 struct perf_event_context;
87842 +struct linux_binprm;
87843
87844 /*
87845 * List of flags we want to share for kernel threads,
87846 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
87847 extern signed long schedule_timeout_uninterruptible(signed long timeout);
87848 asmlinkage void __schedule(void);
87849 asmlinkage void schedule(void);
87850 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
87851 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
87852
87853 struct nsproxy;
87854 struct user_namespace;
87855 @@ -371,9 +372,12 @@ struct user_namespace;
87856 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
87857
87858 extern int sysctl_max_map_count;
87859 +extern unsigned long sysctl_heap_stack_gap;
87860
87861 #include <linux/aio.h>
87862
87863 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
87864 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
87865 extern unsigned long
87866 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
87867 unsigned long, unsigned long);
87868 @@ -666,6 +670,16 @@ struct signal_struct {
87869 struct tty_audit_buf *tty_audit_buf;
87870 #endif
87871
87872 +#ifdef CONFIG_GRKERNSEC
87873 + u32 curr_ip;
87874 + u32 saved_ip;
87875 + u32 gr_saddr;
87876 + u32 gr_daddr;
87877 + u16 gr_sport;
87878 + u16 gr_dport;
87879 + u8 used_accept:1;
87880 +#endif
87881 +
87882 int oom_adj; /* OOM kill score adjustment (bit shift) */
87883 };
87884
87885 @@ -723,6 +737,11 @@ struct user_struct {
87886 struct key *session_keyring; /* UID's default session keyring */
87887 #endif
87888
87889 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
87890 + unsigned int banned;
87891 + unsigned long ban_expires;
87892 +#endif
87893 +
87894 /* Hash table maintenance information */
87895 struct hlist_node uidhash_node;
87896 uid_t uid;
87897 @@ -1328,8 +1347,8 @@ struct task_struct {
87898 struct list_head thread_group;
87899
87900 struct completion *vfork_done; /* for vfork() */
87901 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
87902 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
87903 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
87904 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
87905
87906 cputime_t utime, stime, utimescaled, stimescaled;
87907 cputime_t gtime;
87908 @@ -1343,16 +1362,6 @@ struct task_struct {
87909 struct task_cputime cputime_expires;
87910 struct list_head cpu_timers[3];
87911
87912 -/* process credentials */
87913 - const struct cred *real_cred; /* objective and real subjective task
87914 - * credentials (COW) */
87915 - const struct cred *cred; /* effective (overridable) subjective task
87916 - * credentials (COW) */
87917 - struct mutex cred_guard_mutex; /* guard against foreign influences on
87918 - * credential calculations
87919 - * (notably. ptrace) */
87920 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
87921 -
87922 char comm[TASK_COMM_LEN]; /* executable name excluding path
87923 - access with [gs]et_task_comm (which lock
87924 it with task_lock())
87925 @@ -1369,6 +1378,10 @@ struct task_struct {
87926 #endif
87927 /* CPU-specific state of this task */
87928 struct thread_struct thread;
87929 +/* thread_info moved to task_struct */
87930 +#ifdef CONFIG_X86
87931 + struct thread_info tinfo;
87932 +#endif
87933 /* filesystem information */
87934 struct fs_struct *fs;
87935 /* open file information */
87936 @@ -1436,6 +1449,15 @@ struct task_struct {
87937 int hardirq_context;
87938 int softirq_context;
87939 #endif
87940 +
87941 +/* process credentials */
87942 + const struct cred *real_cred; /* objective and real subjective task
87943 + * credentials (COW) */
87944 + struct mutex cred_guard_mutex; /* guard against foreign influences on
87945 + * credential calculations
87946 + * (notably. ptrace) */
87947 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
87948 +
87949 #ifdef CONFIG_LOCKDEP
87950 # define MAX_LOCK_DEPTH 48UL
87951 u64 curr_chain_key;
87952 @@ -1456,6 +1478,9 @@ struct task_struct {
87953
87954 struct backing_dev_info *backing_dev_info;
87955
87956 + const struct cred *cred; /* effective (overridable) subjective task
87957 + * credentials (COW) */
87958 +
87959 struct io_context *io_context;
87960
87961 unsigned long ptrace_message;
87962 @@ -1519,6 +1544,27 @@ struct task_struct {
87963 unsigned long default_timer_slack_ns;
87964
87965 struct list_head *scm_work_list;
87966 +
87967 +#ifdef CONFIG_GRKERNSEC
87968 + /* grsecurity */
87969 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87970 + u64 exec_id;
87971 +#endif
87972 +#ifdef CONFIG_GRKERNSEC_SETXID
87973 + const struct cred *delayed_cred;
87974 +#endif
87975 + struct dentry *gr_chroot_dentry;
87976 + struct acl_subject_label *acl;
87977 + struct acl_role_label *role;
87978 + struct file *exec_file;
87979 + u16 acl_role_id;
87980 + /* is this the task that authenticated to the special role */
87981 + u8 acl_sp_role;
87982 + u8 is_writable;
87983 + u8 brute;
87984 + u8 gr_is_chrooted;
87985 +#endif
87986 +
87987 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
87988 /* Index of current stored adress in ret_stack */
87989 int curr_ret_stack;
87990 @@ -1542,6 +1588,57 @@ struct task_struct {
87991 #endif /* CONFIG_TRACING */
87992 };
87993
87994 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
87995 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
87996 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
87997 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
87998 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
87999 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
88000 +
88001 +#ifdef CONFIG_PAX_SOFTMODE
88002 +extern int pax_softmode;
88003 +#endif
88004 +
88005 +extern int pax_check_flags(unsigned long *);
88006 +
88007 +/* if tsk != current then task_lock must be held on it */
88008 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
88009 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
88010 +{
88011 + if (likely(tsk->mm))
88012 + return tsk->mm->pax_flags;
88013 + else
88014 + return 0UL;
88015 +}
88016 +
88017 +/* if tsk != current then task_lock must be held on it */
88018 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
88019 +{
88020 + if (likely(tsk->mm)) {
88021 + tsk->mm->pax_flags = flags;
88022 + return 0;
88023 + }
88024 + return -EINVAL;
88025 +}
88026 +#endif
88027 +
88028 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
88029 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
88030 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
88031 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
88032 +#endif
88033 +
88034 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
88035 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
88036 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
88037 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
88038 +
88039 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
88040 +extern void pax_track_stack(void);
88041 +#else
88042 +static inline void pax_track_stack(void) {}
88043 +#endif
88044 +
88045 /* Future-safe accessor for struct task_struct's cpus_allowed. */
88046 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
88047
88048 @@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
88049 #define PF_DUMPCORE 0x00000200 /* dumped core */
88050 #define PF_SIGNALED 0x00000400 /* killed by a signal */
88051 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
88052 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
88053 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
88054 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
88055 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
88056 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
88057 @@ -1978,7 +2075,9 @@ void yield(void);
88058 extern struct exec_domain default_exec_domain;
88059
88060 union thread_union {
88061 +#ifndef CONFIG_X86
88062 struct thread_info thread_info;
88063 +#endif
88064 unsigned long stack[THREAD_SIZE/sizeof(long)];
88065 };
88066
88067 @@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
88068 */
88069
88070 extern struct task_struct *find_task_by_vpid(pid_t nr);
88071 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
88072 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
88073 struct pid_namespace *ns);
88074
88075 @@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
88076 extern void exit_itimers(struct signal_struct *);
88077 extern void flush_itimer_signals(void);
88078
88079 -extern NORET_TYPE void do_group_exit(int);
88080 +extern __noreturn void do_group_exit(int);
88081
88082 extern void daemonize(const char *, ...);
88083 extern int allow_signal(int);
88084 @@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
88085
88086 #endif
88087
88088 -static inline int object_is_on_stack(void *obj)
88089 +static inline int object_starts_on_stack(void *obj)
88090 {
88091 - void *stack = task_stack_page(current);
88092 + const void *stack = task_stack_page(current);
88093
88094 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
88095 }
88096
88097 +#ifdef CONFIG_PAX_USERCOPY
88098 +extern int object_is_on_stack(const void *obj, unsigned long len);
88099 +#endif
88100 +
88101 extern void thread_info_cache_init(void);
88102
88103 #ifdef CONFIG_DEBUG_STACK_USAGE
88104 @@ -2616,6 +2720,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
88105 return task_rlimit_max(current, limit);
88106 }
88107
88108 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
88109 +DECLARE_PER_CPU(u64, exec_counter);
88110 +static inline void increment_exec_counter(void)
88111 +{
88112 + unsigned int cpu;
88113 + u64 *exec_id_ptr;
88114 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
88115 + cpu = get_cpu();
88116 + exec_id_ptr = &per_cpu(exec_counter, cpu);
88117 + *exec_id_ptr += 1ULL << 16;
88118 + current->exec_id = *exec_id_ptr;
88119 + put_cpu();
88120 +}
88121 +#else
88122 +static inline void increment_exec_counter(void) {}
88123 +#endif
88124 +
88125 #endif /* __KERNEL__ */
88126
88127 #endif
88128 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
88129 index 1ee2c05..81b7ec4 100644
88130 --- a/include/linux/screen_info.h
88131 +++ b/include/linux/screen_info.h
88132 @@ -42,7 +42,8 @@ struct screen_info {
88133 __u16 pages; /* 0x32 */
88134 __u16 vesa_attributes; /* 0x34 */
88135 __u32 capabilities; /* 0x36 */
88136 - __u8 _reserved[6]; /* 0x3a */
88137 + __u16 vesapm_size; /* 0x3a */
88138 + __u8 _reserved[4]; /* 0x3c */
88139 } __attribute__((packed));
88140
88141 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
88142 diff --git a/include/linux/security.h b/include/linux/security.h
88143 index d40d23f..d739b08 100644
88144 --- a/include/linux/security.h
88145 +++ b/include/linux/security.h
88146 @@ -34,6 +34,7 @@
88147 #include <linux/key.h>
88148 #include <linux/xfrm.h>
88149 #include <linux/gfp.h>
88150 +#include <linux/grsecurity.h>
88151 #include <net/flow.h>
88152
88153 /* Maximum number of letters for an LSM name string */
88154 @@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
88155 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
88156 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
88157 extern int cap_task_setnice(struct task_struct *p, int nice);
88158 -extern int cap_syslog(int type);
88159 +extern int cap_syslog(int type, bool from_file);
88160 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
88161
88162 struct msghdr;
88163 @@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
88164 * logging to the console.
88165 * See the syslog(2) manual page for an explanation of the @type values.
88166 * @type contains the type of action.
88167 + * @from_file indicates the context of action (if it came from /proc).
88168 * Return 0 if permission is granted.
88169 * @settime:
88170 * Check permission to change the system time.
88171 @@ -1445,7 +1447,7 @@ struct security_operations {
88172 int (*sysctl) (struct ctl_table *table, int op);
88173 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
88174 int (*quota_on) (struct dentry *dentry);
88175 - int (*syslog) (int type);
88176 + int (*syslog) (int type, bool from_file);
88177 int (*settime) (struct timespec *ts, struct timezone *tz);
88178 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
88179
88180 @@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
88181 int security_sysctl(struct ctl_table *table, int op);
88182 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
88183 int security_quota_on(struct dentry *dentry);
88184 -int security_syslog(int type);
88185 +int security_syslog(int type, bool from_file);
88186 int security_settime(struct timespec *ts, struct timezone *tz);
88187 int security_vm_enough_memory(long pages);
88188 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
88189 @@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
88190 return 0;
88191 }
88192
88193 -static inline int security_syslog(int type)
88194 +static inline int security_syslog(int type, bool from_file)
88195 {
88196 - return cap_syslog(type);
88197 + return cap_syslog(type, from_file);
88198 }
88199
88200 static inline int security_settime(struct timespec *ts, struct timezone *tz)
88201 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
88202 index 8366d8f..cc5f9d6 100644
88203 --- a/include/linux/seq_file.h
88204 +++ b/include/linux/seq_file.h
88205 @@ -23,6 +23,9 @@ struct seq_file {
88206 u64 version;
88207 struct mutex lock;
88208 const struct seq_operations *op;
88209 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
88210 + u64 exec_id;
88211 +#endif
88212 void *private;
88213 };
88214
88215 @@ -32,6 +35,7 @@ struct seq_operations {
88216 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
88217 int (*show) (struct seq_file *m, void *v);
88218 };
88219 +typedef struct seq_operations __no_const seq_operations_no_const;
88220
88221 #define SEQ_SKIP 1
88222
88223 diff --git a/include/linux/shm.h b/include/linux/shm.h
88224 index eca6235..c7417ed 100644
88225 --- a/include/linux/shm.h
88226 +++ b/include/linux/shm.h
88227 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
88228 pid_t shm_cprid;
88229 pid_t shm_lprid;
88230 struct user_struct *mlock_user;
88231 +#ifdef CONFIG_GRKERNSEC
88232 + time_t shm_createtime;
88233 + pid_t shm_lapid;
88234 +#endif
88235 };
88236
88237 /* shm_mode upper byte flags */
88238 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
88239 index bcdd660..fd2e332 100644
88240 --- a/include/linux/skbuff.h
88241 +++ b/include/linux/skbuff.h
88242 @@ -14,6 +14,7 @@
88243 #ifndef _LINUX_SKBUFF_H
88244 #define _LINUX_SKBUFF_H
88245
88246 +#include <linux/const.h>
88247 #include <linux/kernel.h>
88248 #include <linux/kmemcheck.h>
88249 #include <linux/compiler.h>
88250 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
88251 */
88252 static inline int skb_queue_empty(const struct sk_buff_head *list)
88253 {
88254 - return list->next == (struct sk_buff *)list;
88255 + return list->next == (const struct sk_buff *)list;
88256 }
88257
88258 /**
88259 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
88260 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
88261 const struct sk_buff *skb)
88262 {
88263 - return (skb->next == (struct sk_buff *) list);
88264 + return (skb->next == (const struct sk_buff *) list);
88265 }
88266
88267 /**
88268 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
88269 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
88270 const struct sk_buff *skb)
88271 {
88272 - return (skb->prev == (struct sk_buff *) list);
88273 + return (skb->prev == (const struct sk_buff *) list);
88274 }
88275
88276 /**
88277 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
88278 * headroom, you should not reduce this.
88279 */
88280 #ifndef NET_SKB_PAD
88281 -#define NET_SKB_PAD 32
88282 +#define NET_SKB_PAD (_AC(32,UL))
88283 #endif
88284
88285 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
88286 @@ -1489,6 +1490,22 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
88287 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
88288 }
88289
88290 +static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
88291 + unsigned int length, gfp_t gfp)
88292 +{
88293 + struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
88294 +
88295 + if (NET_IP_ALIGN && skb)
88296 + skb_reserve(skb, NET_IP_ALIGN);
88297 + return skb;
88298 +}
88299 +
88300 +static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
88301 + unsigned int length)
88302 +{
88303 + return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
88304 +}
88305 +
88306 extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
88307
88308 /**
88309 diff --git a/include/linux/slab.h b/include/linux/slab.h
88310 index 2da8372..9e01add 100644
88311 --- a/include/linux/slab.h
88312 +++ b/include/linux/slab.h
88313 @@ -11,12 +11,20 @@
88314
88315 #include <linux/gfp.h>
88316 #include <linux/types.h>
88317 +#include <linux/err.h>
88318
88319 /*
88320 * Flags to pass to kmem_cache_create().
88321 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
88322 */
88323 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
88324 +
88325 +#ifdef CONFIG_PAX_USERCOPY
88326 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
88327 +#else
88328 +#define SLAB_USERCOPY 0x00000000UL
88329 +#endif
88330 +
88331 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
88332 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
88333 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
88334 @@ -82,10 +90,13 @@
88335 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
88336 * Both make kfree a no-op.
88337 */
88338 -#define ZERO_SIZE_PTR ((void *)16)
88339 +#define ZERO_SIZE_PTR \
88340 +({ \
88341 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
88342 + (void *)(-MAX_ERRNO-1L); \
88343 +})
88344
88345 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
88346 - (unsigned long)ZERO_SIZE_PTR)
88347 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
88348
88349 /*
88350 * struct kmem_cache related prototypes
88351 @@ -133,11 +144,12 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
88352 /*
88353 * Common kmalloc functions provided by all allocators
88354 */
88355 -void * __must_check __krealloc(const void *, size_t, gfp_t);
88356 -void * __must_check krealloc(const void *, size_t, gfp_t);
88357 +void * __must_check __krealloc(const void *, size_t, gfp_t) __size_overflow(2);
88358 +void * __must_check krealloc(const void *, size_t, gfp_t) __size_overflow(2);
88359 void kfree(const void *);
88360 void kzfree(const void *);
88361 size_t ksize(const void *);
88362 +void check_object_size(const void *ptr, unsigned long n, bool to);
88363
88364 /*
88365 * Allocator specific definitions. These are mainly used to establish optimized
88366 @@ -263,7 +275,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
88367 * request comes from.
88368 */
88369 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
88370 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
88371 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
88372 #define kmalloc_track_caller(size, flags) \
88373 __kmalloc_track_caller(size, flags, _RET_IP_)
88374 #else
88375 @@ -281,7 +293,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
88376 * allocation request comes from.
88377 */
88378 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
88379 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
88380 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
88381 #define kmalloc_node_track_caller(size, flags, node) \
88382 __kmalloc_node_track_caller(size, flags, node, \
88383 _RET_IP_)
88384 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
88385 index 850d057..33bad48 100644
88386 --- a/include/linux/slab_def.h
88387 +++ b/include/linux/slab_def.h
88388 @@ -69,10 +69,10 @@ struct kmem_cache {
88389 unsigned long node_allocs;
88390 unsigned long node_frees;
88391 unsigned long node_overflow;
88392 - atomic_t allochit;
88393 - atomic_t allocmiss;
88394 - atomic_t freehit;
88395 - atomic_t freemiss;
88396 + atomic_unchecked_t allochit;
88397 + atomic_unchecked_t allocmiss;
88398 + atomic_unchecked_t freehit;
88399 + atomic_unchecked_t freemiss;
88400
88401 /*
88402 * If debugging is enabled, then the allocator can add additional
88403 @@ -108,7 +108,7 @@ struct cache_sizes {
88404 extern struct cache_sizes malloc_sizes[];
88405
88406 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
88407 -void *__kmalloc(size_t size, gfp_t flags);
88408 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88409
88410 #ifdef CONFIG_KMEMTRACE
88411 extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
88412 @@ -125,6 +125,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
88413 }
88414 #endif
88415
88416 +static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88417 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88418 {
88419 struct kmem_cache *cachep;
88420 @@ -163,7 +164,7 @@ found:
88421 }
88422
88423 #ifdef CONFIG_NUMA
88424 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
88425 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88426 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
88427
88428 #ifdef CONFIG_KMEMTRACE
88429 @@ -180,6 +181,7 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
88430 }
88431 #endif
88432
88433 +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88434 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88435 {
88436 struct kmem_cache *cachep;
88437 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
88438 index 0ec00b3..65e7e0e 100644
88439 --- a/include/linux/slob_def.h
88440 +++ b/include/linux/slob_def.h
88441 @@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
88442 return kmem_cache_alloc_node(cachep, flags, -1);
88443 }
88444
88445 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
88446 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88447
88448 +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88449 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88450 {
88451 return __kmalloc_node(size, flags, node);
88452 @@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88453 * kmalloc is the normal method of allocating memory
88454 * in the kernel.
88455 */
88456 +static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88457 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88458 {
88459 return __kmalloc_node(size, flags, -1);
88460 }
88461
88462 +static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88463 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
88464 {
88465 return kmalloc(size, flags);
88466 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
88467 index 5ad70a6..8f0e2c8 100644
88468 --- a/include/linux/slub_def.h
88469 +++ b/include/linux/slub_def.h
88470 @@ -86,7 +86,7 @@ struct kmem_cache {
88471 struct kmem_cache_order_objects max;
88472 struct kmem_cache_order_objects min;
88473 gfp_t allocflags; /* gfp flags to use on each alloc */
88474 - int refcount; /* Refcount for slab cache destroy */
88475 + atomic_t refcount; /* Refcount for slab cache destroy */
88476 void (*ctor)(void *);
88477 int inuse; /* Offset to metadata */
88478 int align; /* Alignment */
88479 @@ -197,6 +197,7 @@ static __always_inline int kmalloc_index(size_t size)
88480 * This ought to end up with a global pointer to the right cache
88481 * in kmalloc_caches.
88482 */
88483 +static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1);
88484 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
88485 {
88486 int index = kmalloc_index(size);
88487 @@ -215,7 +216,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
88488 #endif
88489
88490 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
88491 -void *__kmalloc(size_t size, gfp_t flags);
88492 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
88493
88494 #ifdef CONFIG_KMEMTRACE
88495 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
88496 @@ -227,6 +228,7 @@ kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
88497 }
88498 #endif
88499
88500 +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
88501 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
88502 {
88503 unsigned int order = get_order(size);
88504 @@ -238,6 +240,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
88505 return ret;
88506 }
88507
88508 +static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88509 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88510 {
88511 void *ret;
88512 @@ -263,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
88513 }
88514
88515 #ifdef CONFIG_NUMA
88516 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
88517 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88518 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
88519
88520 #ifdef CONFIG_KMEMTRACE
88521 @@ -280,6 +283,7 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s,
88522 }
88523 #endif
88524
88525 +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88526 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88527 {
88528 void *ret;
88529 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
88530 index 67ad11f..0bbd8af 100644
88531 --- a/include/linux/sonet.h
88532 +++ b/include/linux/sonet.h
88533 @@ -61,7 +61,7 @@ struct sonet_stats {
88534 #include <asm/atomic.h>
88535
88536 struct k_sonet_stats {
88537 -#define __HANDLE_ITEM(i) atomic_t i
88538 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
88539 __SONET_ITEMS
88540 #undef __HANDLE_ITEM
88541 };
88542 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
88543 index 6f52b4d..5500323 100644
88544 --- a/include/linux/sunrpc/cache.h
88545 +++ b/include/linux/sunrpc/cache.h
88546 @@ -125,7 +125,7 @@ struct cache_detail {
88547 */
88548 struct cache_req {
88549 struct cache_deferred_req *(*defer)(struct cache_req *req);
88550 -};
88551 +} __no_const;
88552 /* this must be embedded in a deferred_request that is being
88553 * delayed awaiting cache-fill
88554 */
88555 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
88556 index 8ed9642..101ceab 100644
88557 --- a/include/linux/sunrpc/clnt.h
88558 +++ b/include/linux/sunrpc/clnt.h
88559 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
88560 {
88561 switch (sap->sa_family) {
88562 case AF_INET:
88563 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
88564 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
88565 case AF_INET6:
88566 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
88567 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
88568 }
88569 return 0;
88570 }
88571 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
88572 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
88573 const struct sockaddr *src)
88574 {
88575 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
88576 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
88577 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
88578
88579 dsin->sin_family = ssin->sin_family;
88580 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
88581 if (sa->sa_family != AF_INET6)
88582 return 0;
88583
88584 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
88585 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
88586 }
88587
88588 #endif /* __KERNEL__ */
88589 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
88590 index c14fe86..393245e 100644
88591 --- a/include/linux/sunrpc/svc_rdma.h
88592 +++ b/include/linux/sunrpc/svc_rdma.h
88593 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
88594 extern unsigned int svcrdma_max_requests;
88595 extern unsigned int svcrdma_max_req_size;
88596
88597 -extern atomic_t rdma_stat_recv;
88598 -extern atomic_t rdma_stat_read;
88599 -extern atomic_t rdma_stat_write;
88600 -extern atomic_t rdma_stat_sq_starve;
88601 -extern atomic_t rdma_stat_rq_starve;
88602 -extern atomic_t rdma_stat_rq_poll;
88603 -extern atomic_t rdma_stat_rq_prod;
88604 -extern atomic_t rdma_stat_sq_poll;
88605 -extern atomic_t rdma_stat_sq_prod;
88606 +extern atomic_unchecked_t rdma_stat_recv;
88607 +extern atomic_unchecked_t rdma_stat_read;
88608 +extern atomic_unchecked_t rdma_stat_write;
88609 +extern atomic_unchecked_t rdma_stat_sq_starve;
88610 +extern atomic_unchecked_t rdma_stat_rq_starve;
88611 +extern atomic_unchecked_t rdma_stat_rq_poll;
88612 +extern atomic_unchecked_t rdma_stat_rq_prod;
88613 +extern atomic_unchecked_t rdma_stat_sq_poll;
88614 +extern atomic_unchecked_t rdma_stat_sq_prod;
88615
88616 #define RPCRDMA_VERSION 1
88617
88618 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
88619 index 5e781d8..1e62818 100644
88620 --- a/include/linux/suspend.h
88621 +++ b/include/linux/suspend.h
88622 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
88623 * which require special recovery actions in that situation.
88624 */
88625 struct platform_suspend_ops {
88626 - int (*valid)(suspend_state_t state);
88627 - int (*begin)(suspend_state_t state);
88628 - int (*prepare)(void);
88629 - int (*prepare_late)(void);
88630 - int (*enter)(suspend_state_t state);
88631 - void (*wake)(void);
88632 - void (*finish)(void);
88633 - void (*end)(void);
88634 - void (*recover)(void);
88635 + int (* const valid)(suspend_state_t state);
88636 + int (* const begin)(suspend_state_t state);
88637 + int (* const prepare)(void);
88638 + int (* const prepare_late)(void);
88639 + int (* const enter)(suspend_state_t state);
88640 + void (* const wake)(void);
88641 + void (* const finish)(void);
88642 + void (* const end)(void);
88643 + void (* const recover)(void);
88644 };
88645
88646 #ifdef CONFIG_SUSPEND
88647 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
88648 * suspend_set_ops - set platform dependent suspend operations
88649 * @ops: The new suspend operations to set.
88650 */
88651 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
88652 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
88653 extern int suspend_valid_only_mem(suspend_state_t state);
88654
88655 /**
88656 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
88657 #else /* !CONFIG_SUSPEND */
88658 #define suspend_valid_only_mem NULL
88659
88660 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
88661 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
88662 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
88663 #endif /* !CONFIG_SUSPEND */
88664
88665 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
88666 * platforms which require special recovery actions in that situation.
88667 */
88668 struct platform_hibernation_ops {
88669 - int (*begin)(void);
88670 - void (*end)(void);
88671 - int (*pre_snapshot)(void);
88672 - void (*finish)(void);
88673 - int (*prepare)(void);
88674 - int (*enter)(void);
88675 - void (*leave)(void);
88676 - int (*pre_restore)(void);
88677 - void (*restore_cleanup)(void);
88678 - void (*recover)(void);
88679 + int (* const begin)(void);
88680 + void (* const end)(void);
88681 + int (* const pre_snapshot)(void);
88682 + void (* const finish)(void);
88683 + int (* const prepare)(void);
88684 + int (* const enter)(void);
88685 + void (* const leave)(void);
88686 + int (* const pre_restore)(void);
88687 + void (* const restore_cleanup)(void);
88688 + void (* const recover)(void);
88689 };
88690
88691 #ifdef CONFIG_HIBERNATION
88692 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
88693 extern void swsusp_unset_page_free(struct page *);
88694 extern unsigned long get_safe_page(gfp_t gfp_mask);
88695
88696 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
88697 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
88698 extern int hibernate(void);
88699 extern bool system_entering_hibernation(void);
88700 #else /* CONFIG_HIBERNATION */
88701 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
88702 static inline void swsusp_set_page_free(struct page *p) {}
88703 static inline void swsusp_unset_page_free(struct page *p) {}
88704
88705 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
88706 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
88707 static inline int hibernate(void) { return -ENOSYS; }
88708 static inline bool system_entering_hibernation(void) { return false; }
88709 #endif /* CONFIG_HIBERNATION */
88710 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
88711 index 0eb6942..a805cb6 100644
88712 --- a/include/linux/sysctl.h
88713 +++ b/include/linux/sysctl.h
88714 @@ -164,7 +164,11 @@ enum
88715 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
88716 };
88717
88718 -
88719 +#ifdef CONFIG_PAX_SOFTMODE
88720 +enum {
88721 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
88722 +};
88723 +#endif
88724
88725 /* CTL_VM names: */
88726 enum
88727 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
88728
88729 extern int proc_dostring(struct ctl_table *, int,
88730 void __user *, size_t *, loff_t *);
88731 +extern int proc_dostring_modpriv(struct ctl_table *, int,
88732 + void __user *, size_t *, loff_t *);
88733 extern int proc_dointvec(struct ctl_table *, int,
88734 void __user *, size_t *, loff_t *);
88735 extern int proc_dointvec_minmax(struct ctl_table *, int,
88736 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
88737
88738 extern ctl_handler sysctl_data;
88739 extern ctl_handler sysctl_string;
88740 +extern ctl_handler sysctl_string_modpriv;
88741 extern ctl_handler sysctl_intvec;
88742 extern ctl_handler sysctl_jiffies;
88743 extern ctl_handler sysctl_ms_jiffies;
88744 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
88745 index 9d68fed..71f02cc 100644
88746 --- a/include/linux/sysfs.h
88747 +++ b/include/linux/sysfs.h
88748 @@ -75,8 +75,8 @@ struct bin_attribute {
88749 };
88750
88751 struct sysfs_ops {
88752 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
88753 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
88754 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
88755 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
88756 };
88757
88758 struct sysfs_dirent;
88759 diff --git a/include/linux/syslog.h b/include/linux/syslog.h
88760 new file mode 100644
88761 index 0000000..3891139
88762 --- /dev/null
88763 +++ b/include/linux/syslog.h
88764 @@ -0,0 +1,52 @@
88765 +/* Syslog internals
88766 + *
88767 + * Copyright 2010 Canonical, Ltd.
88768 + * Author: Kees Cook <kees.cook@canonical.com>
88769 + *
88770 + * This program is free software; you can redistribute it and/or modify
88771 + * it under the terms of the GNU General Public License as published by
88772 + * the Free Software Foundation; either version 2, or (at your option)
88773 + * any later version.
88774 + *
88775 + * This program is distributed in the hope that it will be useful,
88776 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
88777 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
88778 + * GNU General Public License for more details.
88779 + *
88780 + * You should have received a copy of the GNU General Public License
88781 + * along with this program; see the file COPYING. If not, write to
88782 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
88783 + */
88784 +
88785 +#ifndef _LINUX_SYSLOG_H
88786 +#define _LINUX_SYSLOG_H
88787 +
88788 +/* Close the log. Currently a NOP. */
88789 +#define SYSLOG_ACTION_CLOSE 0
88790 +/* Open the log. Currently a NOP. */
88791 +#define SYSLOG_ACTION_OPEN 1
88792 +/* Read from the log. */
88793 +#define SYSLOG_ACTION_READ 2
88794 +/* Read all messages remaining in the ring buffer. */
88795 +#define SYSLOG_ACTION_READ_ALL 3
88796 +/* Read and clear all messages remaining in the ring buffer */
88797 +#define SYSLOG_ACTION_READ_CLEAR 4
88798 +/* Clear ring buffer. */
88799 +#define SYSLOG_ACTION_CLEAR 5
88800 +/* Disable printk's to console */
88801 +#define SYSLOG_ACTION_CONSOLE_OFF 6
88802 +/* Enable printk's to console */
88803 +#define SYSLOG_ACTION_CONSOLE_ON 7
88804 +/* Set level of messages printed to console */
88805 +#define SYSLOG_ACTION_CONSOLE_LEVEL 8
88806 +/* Return number of unread characters in the log buffer */
88807 +#define SYSLOG_ACTION_SIZE_UNREAD 9
88808 +/* Return size of the log buffer */
88809 +#define SYSLOG_ACTION_SIZE_BUFFER 10
88810 +
88811 +#define SYSLOG_FROM_CALL 0
88812 +#define SYSLOG_FROM_FILE 1
88813 +
88814 +int do_syslog(int type, char __user *buf, int count, bool from_file);
88815 +
88816 +#endif /* _LINUX_SYSLOG_H */
88817 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
88818 index a8cc4e1..98d3b85 100644
88819 --- a/include/linux/thread_info.h
88820 +++ b/include/linux/thread_info.h
88821 @@ -23,7 +23,7 @@ struct restart_block {
88822 };
88823 /* For futex_wait and futex_wait_requeue_pi */
88824 struct {
88825 - u32 *uaddr;
88826 + u32 __user *uaddr;
88827 u32 val;
88828 u32 flags;
88829 u32 bitset;
88830 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
88831 index 1eb44a9..f582df3 100644
88832 --- a/include/linux/tracehook.h
88833 +++ b/include/linux/tracehook.h
88834 @@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
88835 /*
88836 * ptrace report for syscall entry and exit looks identical.
88837 */
88838 -static inline void ptrace_report_syscall(struct pt_regs *regs)
88839 +static inline int ptrace_report_syscall(struct pt_regs *regs)
88840 {
88841 int ptrace = task_ptrace(current);
88842
88843 if (!(ptrace & PT_PTRACED))
88844 - return;
88845 + return 0;
88846
88847 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
88848
88849 @@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
88850 send_sig(current->exit_code, current, 1);
88851 current->exit_code = 0;
88852 }
88853 +
88854 + return fatal_signal_pending(current);
88855 }
88856
88857 /**
88858 @@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
88859 static inline __must_check int tracehook_report_syscall_entry(
88860 struct pt_regs *regs)
88861 {
88862 - ptrace_report_syscall(regs);
88863 - return 0;
88864 + return ptrace_report_syscall(regs);
88865 }
88866
88867 /**
88868 diff --git a/include/linux/tty.h b/include/linux/tty.h
88869 index e9c57e9..ee6d489 100644
88870 --- a/include/linux/tty.h
88871 +++ b/include/linux/tty.h
88872 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
88873 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
88874 extern void tty_ldisc_enable(struct tty_struct *tty);
88875
88876 -
88877 /* n_tty.c */
88878 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
88879
88880 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
88881 index 0c4ee9b..9f7c426 100644
88882 --- a/include/linux/tty_ldisc.h
88883 +++ b/include/linux/tty_ldisc.h
88884 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
88885
88886 struct module *owner;
88887
88888 - int refcount;
88889 + atomic_t refcount;
88890 };
88891
88892 struct tty_ldisc {
88893 diff --git a/include/linux/types.h b/include/linux/types.h
88894 index c42724f..d190eee 100644
88895 --- a/include/linux/types.h
88896 +++ b/include/linux/types.h
88897 @@ -191,10 +191,26 @@ typedef struct {
88898 volatile int counter;
88899 } atomic_t;
88900
88901 +#ifdef CONFIG_PAX_REFCOUNT
88902 +typedef struct {
88903 + volatile int counter;
88904 +} atomic_unchecked_t;
88905 +#else
88906 +typedef atomic_t atomic_unchecked_t;
88907 +#endif
88908 +
88909 #ifdef CONFIG_64BIT
88910 typedef struct {
88911 volatile long counter;
88912 } atomic64_t;
88913 +
88914 +#ifdef CONFIG_PAX_REFCOUNT
88915 +typedef struct {
88916 + volatile long counter;
88917 +} atomic64_unchecked_t;
88918 +#else
88919 +typedef atomic64_t atomic64_unchecked_t;
88920 +#endif
88921 #endif
88922
88923 struct ustat {
88924 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
88925 index 6b58367..57b150e 100644
88926 --- a/include/linux/uaccess.h
88927 +++ b/include/linux/uaccess.h
88928 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
88929 long ret; \
88930 mm_segment_t old_fs = get_fs(); \
88931 \
88932 - set_fs(KERNEL_DS); \
88933 pagefault_disable(); \
88934 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
88935 - pagefault_enable(); \
88936 + set_fs(KERNEL_DS); \
88937 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
88938 set_fs(old_fs); \
88939 + pagefault_enable(); \
88940 ret; \
88941 })
88942
88943 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
88944 * Safely read from address @src to the buffer at @dst. If a kernel fault
88945 * happens, handle that and return -EFAULT.
88946 */
88947 -extern long probe_kernel_read(void *dst, void *src, size_t size);
88948 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
88949
88950 /*
88951 * probe_kernel_write(): safely attempt to write to a location
88952 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
88953 * Safely write to address @dst from the buffer at @src. If a kernel fault
88954 * happens, handle that and return -EFAULT.
88955 */
88956 -extern long probe_kernel_write(void *dst, void *src, size_t size);
88957 +extern long probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3);
88958
88959 #endif /* __LINUX_UACCESS_H__ */
88960 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
88961 index 99c1b4d..bb94261 100644
88962 --- a/include/linux/unaligned/access_ok.h
88963 +++ b/include/linux/unaligned/access_ok.h
88964 @@ -6,32 +6,32 @@
88965
88966 static inline u16 get_unaligned_le16(const void *p)
88967 {
88968 - return le16_to_cpup((__le16 *)p);
88969 + return le16_to_cpup((const __le16 *)p);
88970 }
88971
88972 static inline u32 get_unaligned_le32(const void *p)
88973 {
88974 - return le32_to_cpup((__le32 *)p);
88975 + return le32_to_cpup((const __le32 *)p);
88976 }
88977
88978 static inline u64 get_unaligned_le64(const void *p)
88979 {
88980 - return le64_to_cpup((__le64 *)p);
88981 + return le64_to_cpup((const __le64 *)p);
88982 }
88983
88984 static inline u16 get_unaligned_be16(const void *p)
88985 {
88986 - return be16_to_cpup((__be16 *)p);
88987 + return be16_to_cpup((const __be16 *)p);
88988 }
88989
88990 static inline u32 get_unaligned_be32(const void *p)
88991 {
88992 - return be32_to_cpup((__be32 *)p);
88993 + return be32_to_cpup((const __be32 *)p);
88994 }
88995
88996 static inline u64 get_unaligned_be64(const void *p)
88997 {
88998 - return be64_to_cpup((__be64 *)p);
88999 + return be64_to_cpup((const __be64 *)p);
89000 }
89001
89002 static inline void put_unaligned_le16(u16 val, void *p)
89003 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
89004 index 79b9837..b5a56f9 100644
89005 --- a/include/linux/vermagic.h
89006 +++ b/include/linux/vermagic.h
89007 @@ -26,9 +26,35 @@
89008 #define MODULE_ARCH_VERMAGIC ""
89009 #endif
89010
89011 +#ifdef CONFIG_PAX_REFCOUNT
89012 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
89013 +#else
89014 +#define MODULE_PAX_REFCOUNT ""
89015 +#endif
89016 +
89017 +#ifdef CONSTIFY_PLUGIN
89018 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
89019 +#else
89020 +#define MODULE_CONSTIFY_PLUGIN ""
89021 +#endif
89022 +
89023 +#ifdef STACKLEAK_PLUGIN
89024 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
89025 +#else
89026 +#define MODULE_STACKLEAK_PLUGIN ""
89027 +#endif
89028 +
89029 +#ifdef CONFIG_GRKERNSEC
89030 +#define MODULE_GRSEC "GRSEC "
89031 +#else
89032 +#define MODULE_GRSEC ""
89033 +#endif
89034 +
89035 #define VERMAGIC_STRING \
89036 UTS_RELEASE " " \
89037 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
89038 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
89039 - MODULE_ARCH_VERMAGIC
89040 + MODULE_ARCH_VERMAGIC \
89041 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
89042 + MODULE_GRSEC
89043
89044 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
89045 index 819a634..b99e71b 100644
89046 --- a/include/linux/vmalloc.h
89047 +++ b/include/linux/vmalloc.h
89048 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
89049 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
89050 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
89051 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
89052 +
89053 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
89054 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
89055 +#endif
89056 +
89057 /* bits [20..32] reserved for arch specific ioremap internals */
89058
89059 /*
89060 @@ -51,13 +56,13 @@ static inline void vmalloc_init(void)
89061 }
89062 #endif
89063
89064 -extern void *vmalloc(unsigned long size);
89065 -extern void *vmalloc_user(unsigned long size);
89066 -extern void *vmalloc_node(unsigned long size, int node);
89067 -extern void *vmalloc_exec(unsigned long size);
89068 -extern void *vmalloc_32(unsigned long size);
89069 -extern void *vmalloc_32_user(unsigned long size);
89070 -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
89071 +extern void *vmalloc(unsigned long size) __size_overflow(1);
89072 +extern void *vmalloc_user(unsigned long size) __size_overflow(1);
89073 +extern void *vmalloc_node(unsigned long size, int node) __size_overflow(1);
89074 +extern void *vmalloc_exec(unsigned long size) __size_overflow(1);
89075 +extern void *vmalloc_32(unsigned long size) __size_overflow(1);
89076 +extern void *vmalloc_32_user(unsigned long size) __size_overflow(1);
89077 +extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __size_overflow(1);
89078 extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
89079 pgprot_t prot);
89080 extern void vfree(const void *addr);
89081 @@ -106,8 +111,8 @@ extern struct vm_struct *alloc_vm_area(size_t size);
89082 extern void free_vm_area(struct vm_struct *area);
89083
89084 /* for /dev/kmem */
89085 -extern long vread(char *buf, char *addr, unsigned long count);
89086 -extern long vwrite(char *buf, char *addr, unsigned long count);
89087 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
89088 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
89089
89090 /*
89091 * Internals. Dont't use..
89092 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
89093 index 13070d6..aa4159a 100644
89094 --- a/include/linux/vmstat.h
89095 +++ b/include/linux/vmstat.h
89096 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
89097 /*
89098 * Zone based page accounting with per cpu differentials.
89099 */
89100 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
89101 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
89102
89103 static inline void zone_page_state_add(long x, struct zone *zone,
89104 enum zone_stat_item item)
89105 {
89106 - atomic_long_add(x, &zone->vm_stat[item]);
89107 - atomic_long_add(x, &vm_stat[item]);
89108 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
89109 + atomic_long_add_unchecked(x, &vm_stat[item]);
89110 }
89111
89112 static inline unsigned long global_page_state(enum zone_stat_item item)
89113 {
89114 - long x = atomic_long_read(&vm_stat[item]);
89115 + long x = atomic_long_read_unchecked(&vm_stat[item]);
89116 #ifdef CONFIG_SMP
89117 if (x < 0)
89118 x = 0;
89119 @@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
89120 static inline unsigned long zone_page_state(struct zone *zone,
89121 enum zone_stat_item item)
89122 {
89123 - long x = atomic_long_read(&zone->vm_stat[item]);
89124 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
89125 #ifdef CONFIG_SMP
89126 if (x < 0)
89127 x = 0;
89128 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
89129 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
89130 enum zone_stat_item item)
89131 {
89132 - long x = atomic_long_read(&zone->vm_stat[item]);
89133 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
89134
89135 #ifdef CONFIG_SMP
89136 int cpu;
89137 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
89138
89139 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
89140 {
89141 - atomic_long_inc(&zone->vm_stat[item]);
89142 - atomic_long_inc(&vm_stat[item]);
89143 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
89144 + atomic_long_inc_unchecked(&vm_stat[item]);
89145 }
89146
89147 static inline void __inc_zone_page_state(struct page *page,
89148 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
89149
89150 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
89151 {
89152 - atomic_long_dec(&zone->vm_stat[item]);
89153 - atomic_long_dec(&vm_stat[item]);
89154 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
89155 + atomic_long_dec_unchecked(&vm_stat[item]);
89156 }
89157
89158 static inline void __dec_zone_page_state(struct page *page,
89159 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
89160 index 5c84af8..1a3b6e2 100644
89161 --- a/include/linux/xattr.h
89162 +++ b/include/linux/xattr.h
89163 @@ -33,6 +33,11 @@
89164 #define XATTR_USER_PREFIX "user."
89165 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
89166
89167 +/* User namespace */
89168 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
89169 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
89170 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
89171 +
89172 struct inode;
89173 struct dentry;
89174
89175 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
89176 index eed5fcc..5080d24 100644
89177 --- a/include/media/saa7146_vv.h
89178 +++ b/include/media/saa7146_vv.h
89179 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
89180 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
89181
89182 /* the extension can override this */
89183 - struct v4l2_ioctl_ops ops;
89184 + v4l2_ioctl_ops_no_const ops;
89185 /* pointer to the saa7146 core ops */
89186 const struct v4l2_ioctl_ops *core_ops;
89187
89188 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
89189 index 73c9867..2da8837 100644
89190 --- a/include/media/v4l2-dev.h
89191 +++ b/include/media/v4l2-dev.h
89192 @@ -34,7 +34,7 @@ struct v4l2_device;
89193 #define V4L2_FL_UNREGISTERED (0)
89194
89195 struct v4l2_file_operations {
89196 - struct module *owner;
89197 + struct module * const owner;
89198 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
89199 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
89200 unsigned int (*poll) (struct file *, struct poll_table_struct *);
89201 @@ -46,6 +46,7 @@ struct v4l2_file_operations {
89202 int (*open) (struct file *);
89203 int (*release) (struct file *);
89204 };
89205 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
89206
89207 /*
89208 * Newer version of video_device, handled by videodev2.c
89209 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
89210 index 5d5d550..f559ef1 100644
89211 --- a/include/media/v4l2-device.h
89212 +++ b/include/media/v4l2-device.h
89213 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
89214 this function returns 0. If the name ends with a digit (e.g. cx18),
89215 then the name will be set to cx18-0 since cx180 looks really odd. */
89216 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
89217 - atomic_t *instance);
89218 + atomic_unchecked_t *instance);
89219
89220 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
89221 Since the parent disappears this ensures that v4l2_dev doesn't have an
89222 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
89223 index 7a4529d..7244290 100644
89224 --- a/include/media/v4l2-ioctl.h
89225 +++ b/include/media/v4l2-ioctl.h
89226 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
89227 long (*vidioc_default) (struct file *file, void *fh,
89228 int cmd, void *arg);
89229 };
89230 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
89231
89232
89233 /* v4l debugging and diagnostics */
89234 diff --git a/include/net/flow.h b/include/net/flow.h
89235 index 809970b..c3df4f3 100644
89236 --- a/include/net/flow.h
89237 +++ b/include/net/flow.h
89238 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
89239 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
89240 u8 dir, flow_resolve_t resolver);
89241 extern void flow_cache_flush(void);
89242 -extern atomic_t flow_cache_genid;
89243 +extern atomic_unchecked_t flow_cache_genid;
89244
89245 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
89246 {
89247 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
89248 index 15e1f8fe..668837c 100644
89249 --- a/include/net/inetpeer.h
89250 +++ b/include/net/inetpeer.h
89251 @@ -24,7 +24,7 @@ struct inet_peer
89252 __u32 dtime; /* the time of last use of not
89253 * referenced entries */
89254 atomic_t refcnt;
89255 - atomic_t rid; /* Frag reception counter */
89256 + atomic_unchecked_t rid; /* Frag reception counter */
89257 __u32 tcp_ts;
89258 unsigned long tcp_ts_stamp;
89259 };
89260 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
89261 index 98978e7..2243a3d 100644
89262 --- a/include/net/ip_vs.h
89263 +++ b/include/net/ip_vs.h
89264 @@ -365,7 +365,7 @@ struct ip_vs_conn {
89265 struct ip_vs_conn *control; /* Master control connection */
89266 atomic_t n_control; /* Number of controlled ones */
89267 struct ip_vs_dest *dest; /* real server */
89268 - atomic_t in_pkts; /* incoming packet counter */
89269 + atomic_unchecked_t in_pkts; /* incoming packet counter */
89270
89271 /* packet transmitter for different forwarding methods. If it
89272 mangles the packet, it must return NF_DROP or better NF_STOLEN,
89273 @@ -466,7 +466,7 @@ struct ip_vs_dest {
89274 union nf_inet_addr addr; /* IP address of the server */
89275 __be16 port; /* port number of the server */
89276 volatile unsigned flags; /* dest status flags */
89277 - atomic_t conn_flags; /* flags to copy to conn */
89278 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
89279 atomic_t weight; /* server weight */
89280
89281 atomic_t refcnt; /* reference counter */
89282 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
89283 index 69b610a..fe3962c 100644
89284 --- a/include/net/irda/ircomm_core.h
89285 +++ b/include/net/irda/ircomm_core.h
89286 @@ -51,7 +51,7 @@ typedef struct {
89287 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
89288 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
89289 struct ircomm_info *);
89290 -} call_t;
89291 +} __no_const call_t;
89292
89293 struct ircomm_cb {
89294 irda_queue_t queue;
89295 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
89296 index eea2e61..08c692d 100644
89297 --- a/include/net/irda/ircomm_tty.h
89298 +++ b/include/net/irda/ircomm_tty.h
89299 @@ -35,6 +35,7 @@
89300 #include <linux/termios.h>
89301 #include <linux/timer.h>
89302 #include <linux/tty.h> /* struct tty_struct */
89303 +#include <asm/local.h>
89304
89305 #include <net/irda/irias_object.h>
89306 #include <net/irda/ircomm_core.h>
89307 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
89308 unsigned short close_delay;
89309 unsigned short closing_wait; /* time to wait before closing */
89310
89311 - int open_count;
89312 - int blocked_open; /* # of blocked opens */
89313 + local_t open_count;
89314 + local_t blocked_open; /* # of blocked opens */
89315
89316 /* Protect concurent access to :
89317 * o self->open_count
89318 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
89319 index f82a1e8..82d81e8 100644
89320 --- a/include/net/iucv/af_iucv.h
89321 +++ b/include/net/iucv/af_iucv.h
89322 @@ -87,7 +87,7 @@ struct iucv_sock {
89323 struct iucv_sock_list {
89324 struct hlist_head head;
89325 rwlock_t lock;
89326 - atomic_t autobind_name;
89327 + atomic_unchecked_t autobind_name;
89328 };
89329
89330 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
89331 diff --git a/include/net/lapb.h b/include/net/lapb.h
89332 index 96cb5dd..25e8d4f 100644
89333 --- a/include/net/lapb.h
89334 +++ b/include/net/lapb.h
89335 @@ -95,7 +95,7 @@ struct lapb_cb {
89336 struct sk_buff_head write_queue;
89337 struct sk_buff_head ack_queue;
89338 unsigned char window;
89339 - struct lapb_register_struct callbacks;
89340 + struct lapb_register_struct *callbacks;
89341
89342 /* FRMR control information */
89343 struct lapb_frame frmr_data;
89344 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
89345 index 3817fda..cdb2343 100644
89346 --- a/include/net/neighbour.h
89347 +++ b/include/net/neighbour.h
89348 @@ -131,7 +131,7 @@ struct neigh_ops
89349 int (*connected_output)(struct sk_buff*);
89350 int (*hh_output)(struct sk_buff*);
89351 int (*queue_xmit)(struct sk_buff*);
89352 -};
89353 +} __do_const;
89354
89355 struct pneigh_entry
89356 {
89357 diff --git a/include/net/netlink.h b/include/net/netlink.h
89358 index c344646..4778c71 100644
89359 --- a/include/net/netlink.h
89360 +++ b/include/net/netlink.h
89361 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
89362 {
89363 return (remaining >= (int) sizeof(struct nlmsghdr) &&
89364 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
89365 - nlh->nlmsg_len <= remaining);
89366 + nlh->nlmsg_len <= (unsigned int)remaining);
89367 }
89368
89369 /**
89370 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
89371 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
89372 {
89373 if (mark)
89374 - skb_trim(skb, (unsigned char *) mark - skb->data);
89375 + skb_trim(skb, (const unsigned char *) mark - skb->data);
89376 }
89377
89378 /**
89379 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
89380 index 9a4b8b7..e49e077 100644
89381 --- a/include/net/netns/ipv4.h
89382 +++ b/include/net/netns/ipv4.h
89383 @@ -54,7 +54,7 @@ struct netns_ipv4 {
89384 int current_rt_cache_rebuild_count;
89385
89386 struct timer_list rt_secret_timer;
89387 - atomic_t rt_genid;
89388 + atomic_unchecked_t rt_genid;
89389
89390 #ifdef CONFIG_IP_MROUTE
89391 struct sock *mroute_sk;
89392 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
89393 index 8a6d529..171f401 100644
89394 --- a/include/net/sctp/sctp.h
89395 +++ b/include/net/sctp/sctp.h
89396 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
89397
89398 #else /* SCTP_DEBUG */
89399
89400 -#define SCTP_DEBUG_PRINTK(whatever...)
89401 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
89402 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
89403 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
89404 #define SCTP_ENABLE_DEBUG
89405 #define SCTP_DISABLE_DEBUG
89406 #define SCTP_ASSERT(expr, str, func)
89407 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
89408 index d97f689..f3b90ab 100644
89409 --- a/include/net/secure_seq.h
89410 +++ b/include/net/secure_seq.h
89411 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
89412 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
89413 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
89414 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
89415 - __be16 dport);
89416 + __be16 dport);
89417 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
89418 __be16 sport, __be16 dport);
89419 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
89420 - __be16 sport, __be16 dport);
89421 + __be16 sport, __be16 dport);
89422 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
89423 - __be16 sport, __be16 dport);
89424 + __be16 sport, __be16 dport);
89425 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
89426 - __be16 sport, __be16 dport);
89427 + __be16 sport, __be16 dport);
89428
89429 #endif /* _NET_SECURE_SEQ */
89430 diff --git a/include/net/sock.h b/include/net/sock.h
89431 index 78adf52..99afd29 100644
89432 --- a/include/net/sock.h
89433 +++ b/include/net/sock.h
89434 @@ -272,7 +272,7 @@ struct sock {
89435 rwlock_t sk_callback_lock;
89436 int sk_err,
89437 sk_err_soft;
89438 - atomic_t sk_drops;
89439 + atomic_unchecked_t sk_drops;
89440 unsigned short sk_ack_backlog;
89441 unsigned short sk_max_ack_backlog;
89442 __u32 sk_priority;
89443 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
89444 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
89445 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
89446 #else
89447 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
89448 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
89449 int inc)
89450 {
89451 }
89452 diff --git a/include/net/tcp.h b/include/net/tcp.h
89453 index 6cfe18b..dd21acb 100644
89454 --- a/include/net/tcp.h
89455 +++ b/include/net/tcp.h
89456 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
89457 struct tcp_seq_afinfo {
89458 char *name;
89459 sa_family_t family;
89460 - struct file_operations seq_fops;
89461 - struct seq_operations seq_ops;
89462 + file_operations_no_const seq_fops;
89463 + seq_operations_no_const seq_ops;
89464 };
89465
89466 struct tcp_iter_state {
89467 diff --git a/include/net/udp.h b/include/net/udp.h
89468 index f98abd2..b4b042f 100644
89469 --- a/include/net/udp.h
89470 +++ b/include/net/udp.h
89471 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
89472 char *name;
89473 sa_family_t family;
89474 struct udp_table *udp_table;
89475 - struct file_operations seq_fops;
89476 - struct seq_operations seq_ops;
89477 + file_operations_no_const seq_fops;
89478 + seq_operations_no_const seq_ops;
89479 };
89480
89481 struct udp_iter_state {
89482 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
89483 index cbb822e..e9c1cbe 100644
89484 --- a/include/rdma/iw_cm.h
89485 +++ b/include/rdma/iw_cm.h
89486 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
89487 int backlog);
89488
89489 int (*destroy_listen)(struct iw_cm_id *cm_id);
89490 -};
89491 +} __no_const;
89492
89493 /**
89494 * iw_create_cm_id - Create an IW CM identifier.
89495 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
89496 index 09a124b..caa8ca8 100644
89497 --- a/include/scsi/libfc.h
89498 +++ b/include/scsi/libfc.h
89499 @@ -675,6 +675,7 @@ struct libfc_function_template {
89500 */
89501 void (*disc_stop_final) (struct fc_lport *);
89502 };
89503 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
89504
89505 /* information used by the discovery layer */
89506 struct fc_disc {
89507 @@ -707,7 +708,7 @@ struct fc_lport {
89508 struct fc_disc disc;
89509
89510 /* Operational Information */
89511 - struct libfc_function_template tt;
89512 + libfc_function_template_no_const tt;
89513 u8 link_up;
89514 u8 qfull;
89515 enum fc_lport_state state;
89516 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
89517 index de8e180..f15e0d7 100644
89518 --- a/include/scsi/scsi_device.h
89519 +++ b/include/scsi/scsi_device.h
89520 @@ -156,9 +156,9 @@ struct scsi_device {
89521 unsigned int max_device_blocked; /* what device_blocked counts down from */
89522 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
89523
89524 - atomic_t iorequest_cnt;
89525 - atomic_t iodone_cnt;
89526 - atomic_t ioerr_cnt;
89527 + atomic_unchecked_t iorequest_cnt;
89528 + atomic_unchecked_t iodone_cnt;
89529 + atomic_unchecked_t ioerr_cnt;
89530
89531 struct device sdev_gendev,
89532 sdev_dev;
89533 diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
89534 index 0b4baba..0106e9e 100644
89535 --- a/include/scsi/scsi_host.h
89536 +++ b/include/scsi/scsi_host.h
89537 @@ -43,6 +43,12 @@ struct blk_queue_tags;
89538 #define DISABLE_CLUSTERING 0
89539 #define ENABLE_CLUSTERING 1
89540
89541 +enum {
89542 + SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */
89543 + SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */
89544 + SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshhold event */
89545 +};
89546 +
89547 struct scsi_host_template {
89548 struct module *module;
89549 const char *name;
89550 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
89551 index fc50bd6..81ba9cb 100644
89552 --- a/include/scsi/scsi_transport_fc.h
89553 +++ b/include/scsi/scsi_transport_fc.h
89554 @@ -708,7 +708,7 @@ struct fc_function_template {
89555 unsigned long show_host_system_hostname:1;
89556
89557 unsigned long disable_target_scan:1;
89558 -};
89559 +} __do_const;
89560
89561
89562 /**
89563 diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
89564 index 3dae3f7..8440d6f 100644
89565 --- a/include/sound/ac97_codec.h
89566 +++ b/include/sound/ac97_codec.h
89567 @@ -419,15 +419,15 @@
89568 struct snd_ac97;
89569
89570 struct snd_ac97_build_ops {
89571 - int (*build_3d) (struct snd_ac97 *ac97);
89572 - int (*build_specific) (struct snd_ac97 *ac97);
89573 - int (*build_spdif) (struct snd_ac97 *ac97);
89574 - int (*build_post_spdif) (struct snd_ac97 *ac97);
89575 + int (* const build_3d) (struct snd_ac97 *ac97);
89576 + int (* const build_specific) (struct snd_ac97 *ac97);
89577 + int (* const build_spdif) (struct snd_ac97 *ac97);
89578 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
89579 #ifdef CONFIG_PM
89580 - void (*suspend) (struct snd_ac97 *ac97);
89581 - void (*resume) (struct snd_ac97 *ac97);
89582 + void (* const suspend) (struct snd_ac97 *ac97);
89583 + void (* const resume) (struct snd_ac97 *ac97);
89584 #endif
89585 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
89586 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
89587 };
89588
89589 struct snd_ac97_bus_ops {
89590 @@ -477,7 +477,7 @@ struct snd_ac97_template {
89591
89592 struct snd_ac97 {
89593 /* -- lowlevel (hardware) driver specific -- */
89594 - struct snd_ac97_build_ops * build_ops;
89595 + const struct snd_ac97_build_ops * build_ops;
89596 void *private_data;
89597 void (*private_free) (struct snd_ac97 *ac97);
89598 /* --- */
89599 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
89600 index 891cf1a..a94ba2b 100644
89601 --- a/include/sound/ak4xxx-adda.h
89602 +++ b/include/sound/ak4xxx-adda.h
89603 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
89604 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
89605 unsigned char val);
89606 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
89607 -};
89608 +} __no_const;
89609
89610 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
89611
89612 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
89613 index 8c05e47..2b5df97 100644
89614 --- a/include/sound/hwdep.h
89615 +++ b/include/sound/hwdep.h
89616 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
89617 struct snd_hwdep_dsp_status *status);
89618 int (*dsp_load)(struct snd_hwdep *hw,
89619 struct snd_hwdep_dsp_image *image);
89620 -};
89621 +} __no_const;
89622
89623 struct snd_hwdep {
89624 struct snd_card *card;
89625 diff --git a/include/sound/info.h b/include/sound/info.h
89626 index 112e894..6fda5b5 100644
89627 --- a/include/sound/info.h
89628 +++ b/include/sound/info.h
89629 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
89630 struct snd_info_buffer *buffer);
89631 void (*write)(struct snd_info_entry *entry,
89632 struct snd_info_buffer *buffer);
89633 -};
89634 +} __no_const;
89635
89636 struct snd_info_entry_ops {
89637 int (*open)(struct snd_info_entry *entry,
89638 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
89639 index de6d981..590a550 100644
89640 --- a/include/sound/pcm.h
89641 +++ b/include/sound/pcm.h
89642 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
89643 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
89644 int (*ack)(struct snd_pcm_substream *substream);
89645 };
89646 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
89647
89648 /*
89649 *
89650 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
89651 index 736eac7..fe8a80f 100644
89652 --- a/include/sound/sb16_csp.h
89653 +++ b/include/sound/sb16_csp.h
89654 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
89655 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
89656 int (*csp_stop) (struct snd_sb_csp * p);
89657 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
89658 -};
89659 +} __no_const;
89660
89661 /*
89662 * CSP private data
89663 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
89664 index 444cd6b..3327cc5 100644
89665 --- a/include/sound/ymfpci.h
89666 +++ b/include/sound/ymfpci.h
89667 @@ -358,7 +358,7 @@ struct snd_ymfpci {
89668 spinlock_t reg_lock;
89669 spinlock_t voice_lock;
89670 wait_queue_head_t interrupt_sleep;
89671 - atomic_t interrupt_sleep_count;
89672 + atomic_unchecked_t interrupt_sleep_count;
89673 struct snd_info_entry *proc_entry;
89674 const struct firmware *dsp_microcode;
89675 const struct firmware *controller_microcode;
89676 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
89677 index b89f9db..f097b38 100644
89678 --- a/include/trace/events/irq.h
89679 +++ b/include/trace/events/irq.h
89680 @@ -34,7 +34,7 @@
89681 */
89682 TRACE_EVENT(irq_handler_entry,
89683
89684 - TP_PROTO(int irq, struct irqaction *action),
89685 + TP_PROTO(int irq, const struct irqaction *action),
89686
89687 TP_ARGS(irq, action),
89688
89689 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
89690 */
89691 TRACE_EVENT(irq_handler_exit,
89692
89693 - TP_PROTO(int irq, struct irqaction *action, int ret),
89694 + TP_PROTO(int irq, const struct irqaction *action, int ret),
89695
89696 TP_ARGS(irq, action, ret),
89697
89698 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
89699 */
89700 TRACE_EVENT(softirq_entry,
89701
89702 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
89703 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
89704
89705 TP_ARGS(h, vec),
89706
89707 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
89708 */
89709 TRACE_EVENT(softirq_exit,
89710
89711 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
89712 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
89713
89714 TP_ARGS(h, vec),
89715
89716 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
89717 index 0993a22..32ba2fe 100644
89718 --- a/include/video/uvesafb.h
89719 +++ b/include/video/uvesafb.h
89720 @@ -177,6 +177,7 @@ struct uvesafb_par {
89721 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
89722 u8 pmi_setpal; /* PMI for palette changes */
89723 u16 *pmi_base; /* protected mode interface location */
89724 + u8 *pmi_code; /* protected mode code location */
89725 void *pmi_start;
89726 void *pmi_pal;
89727 u8 *vbe_state_orig; /*
89728 diff --git a/init/Kconfig b/init/Kconfig
89729 index d72691b..3996e54 100644
89730 --- a/init/Kconfig
89731 +++ b/init/Kconfig
89732 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
89733
89734 config COMPAT_BRK
89735 bool "Disable heap randomization"
89736 - default y
89737 + default n
89738 help
89739 Randomizing heap placement makes heap exploits harder, but it
89740 also breaks ancient binaries (including anything libc5 based).
89741 diff --git a/init/do_mounts.c b/init/do_mounts.c
89742 index bb008d0..4fa3933 100644
89743 --- a/init/do_mounts.c
89744 +++ b/init/do_mounts.c
89745 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
89746
89747 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
89748 {
89749 - int err = sys_mount(name, "/root", fs, flags, data);
89750 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
89751 if (err)
89752 return err;
89753
89754 - sys_chdir("/root");
89755 + sys_chdir((__force const char __user *)"/root");
89756 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
89757 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
89758 current->fs->pwd.mnt->mnt_sb->s_type->name,
89759 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
89760 va_start(args, fmt);
89761 vsprintf(buf, fmt, args);
89762 va_end(args);
89763 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
89764 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
89765 if (fd >= 0) {
89766 sys_ioctl(fd, FDEJECT, 0);
89767 sys_close(fd);
89768 }
89769 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
89770 - fd = sys_open("/dev/console", O_RDWR, 0);
89771 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
89772 if (fd >= 0) {
89773 sys_ioctl(fd, TCGETS, (long)&termios);
89774 termios.c_lflag &= ~ICANON;
89775 sys_ioctl(fd, TCSETSF, (long)&termios);
89776 - sys_read(fd, &c, 1);
89777 + sys_read(fd, (char __user *)&c, 1);
89778 termios.c_lflag |= ICANON;
89779 sys_ioctl(fd, TCSETSF, (long)&termios);
89780 sys_close(fd);
89781 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
89782 mount_root();
89783 out:
89784 devtmpfs_mount("dev");
89785 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
89786 - sys_chroot(".");
89787 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
89788 + sys_chroot((__force char __user *)".");
89789 }
89790 diff --git a/init/do_mounts.h b/init/do_mounts.h
89791 index f5b978a..69dbfe8 100644
89792 --- a/init/do_mounts.h
89793 +++ b/init/do_mounts.h
89794 @@ -15,15 +15,15 @@ extern int root_mountflags;
89795
89796 static inline int create_dev(char *name, dev_t dev)
89797 {
89798 - sys_unlink(name);
89799 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
89800 + sys_unlink((char __force_user *)name);
89801 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
89802 }
89803
89804 #if BITS_PER_LONG == 32
89805 static inline u32 bstat(char *name)
89806 {
89807 struct stat64 stat;
89808 - if (sys_stat64(name, &stat) != 0)
89809 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
89810 return 0;
89811 if (!S_ISBLK(stat.st_mode))
89812 return 0;
89813 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
89814 static inline u32 bstat(char *name)
89815 {
89816 struct stat stat;
89817 - if (sys_newstat(name, &stat) != 0)
89818 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
89819 return 0;
89820 if (!S_ISBLK(stat.st_mode))
89821 return 0;
89822 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
89823 index 614241b..4da046b 100644
89824 --- a/init/do_mounts_initrd.c
89825 +++ b/init/do_mounts_initrd.c
89826 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
89827 sys_close(old_fd);sys_close(root_fd);
89828 sys_close(0);sys_close(1);sys_close(2);
89829 sys_setsid();
89830 - (void) sys_open("/dev/console",O_RDWR,0);
89831 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
89832 (void) sys_dup(0);
89833 (void) sys_dup(0);
89834 return kernel_execve(shell, argv, envp_init);
89835 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
89836 create_dev("/dev/root.old", Root_RAM0);
89837 /* mount initrd on rootfs' /root */
89838 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
89839 - sys_mkdir("/old", 0700);
89840 - root_fd = sys_open("/", 0, 0);
89841 - old_fd = sys_open("/old", 0, 0);
89842 + sys_mkdir((const char __force_user *)"/old", 0700);
89843 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
89844 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
89845 /* move initrd over / and chdir/chroot in initrd root */
89846 - sys_chdir("/root");
89847 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
89848 - sys_chroot(".");
89849 + sys_chdir((const char __force_user *)"/root");
89850 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89851 + sys_chroot((const char __force_user *)".");
89852
89853 /*
89854 * In case that a resume from disk is carried out by linuxrc or one of
89855 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
89856
89857 /* move initrd to rootfs' /old */
89858 sys_fchdir(old_fd);
89859 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
89860 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
89861 /* switch root and cwd back to / of rootfs */
89862 sys_fchdir(root_fd);
89863 - sys_chroot(".");
89864 + sys_chroot((const char __force_user *)".");
89865 sys_close(old_fd);
89866 sys_close(root_fd);
89867
89868 if (new_decode_dev(real_root_dev) == Root_RAM0) {
89869 - sys_chdir("/old");
89870 + sys_chdir((const char __force_user *)"/old");
89871 return;
89872 }
89873
89874 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
89875 mount_root();
89876
89877 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
89878 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
89879 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89880 if (!error)
89881 printk("okay\n");
89882 else {
89883 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
89884 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89885 if (error == -ENOENT)
89886 printk("/initrd does not exist. Ignored.\n");
89887 else
89888 printk("failed\n");
89889 printk(KERN_NOTICE "Unmounting old root\n");
89890 - sys_umount("/old", MNT_DETACH);
89891 + sys_umount((char __force_user *)"/old", MNT_DETACH);
89892 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
89893 if (fd < 0) {
89894 error = fd;
89895 @@ -119,11 +119,11 @@ int __init initrd_load(void)
89896 * mounted in the normal path.
89897 */
89898 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
89899 - sys_unlink("/initrd.image");
89900 + sys_unlink((const char __force_user *)"/initrd.image");
89901 handle_initrd();
89902 return 1;
89903 }
89904 }
89905 - sys_unlink("/initrd.image");
89906 + sys_unlink((const char __force_user *)"/initrd.image");
89907 return 0;
89908 }
89909 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
89910 index 69aebbf..c0bf6a7 100644
89911 --- a/init/do_mounts_md.c
89912 +++ b/init/do_mounts_md.c
89913 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
89914 partitioned ? "_d" : "", minor,
89915 md_setup_args[ent].device_names);
89916
89917 - fd = sys_open(name, 0, 0);
89918 + fd = sys_open((char __force_user *)name, 0, 0);
89919 if (fd < 0) {
89920 printk(KERN_ERR "md: open failed - cannot start "
89921 "array %s\n", name);
89922 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
89923 * array without it
89924 */
89925 sys_close(fd);
89926 - fd = sys_open(name, 0, 0);
89927 + fd = sys_open((char __force_user *)name, 0, 0);
89928 sys_ioctl(fd, BLKRRPART, 0);
89929 }
89930 sys_close(fd);
89931 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
89932
89933 wait_for_device_probe();
89934
89935 - fd = sys_open("/dev/md0", 0, 0);
89936 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
89937 if (fd >= 0) {
89938 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
89939 sys_close(fd);
89940 diff --git a/init/initramfs.c b/init/initramfs.c
89941 index 1fd59b8..a01b079 100644
89942 --- a/init/initramfs.c
89943 +++ b/init/initramfs.c
89944 @@ -74,7 +74,7 @@ static void __init free_hash(void)
89945 }
89946 }
89947
89948 -static long __init do_utime(char __user *filename, time_t mtime)
89949 +static long __init do_utime(__force char __user *filename, time_t mtime)
89950 {
89951 struct timespec t[2];
89952
89953 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
89954 struct dir_entry *de, *tmp;
89955 list_for_each_entry_safe(de, tmp, &dir_list, list) {
89956 list_del(&de->list);
89957 - do_utime(de->name, de->mtime);
89958 + do_utime((char __force_user *)de->name, de->mtime);
89959 kfree(de->name);
89960 kfree(de);
89961 }
89962 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
89963 if (nlink >= 2) {
89964 char *old = find_link(major, minor, ino, mode, collected);
89965 if (old)
89966 - return (sys_link(old, collected) < 0) ? -1 : 1;
89967 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
89968 }
89969 return 0;
89970 }
89971 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
89972 {
89973 struct stat st;
89974
89975 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
89976 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
89977 if (S_ISDIR(st.st_mode))
89978 - sys_rmdir(path);
89979 + sys_rmdir((char __force_user *)path);
89980 else
89981 - sys_unlink(path);
89982 + sys_unlink((char __force_user *)path);
89983 }
89984 }
89985
89986 @@ -305,7 +305,7 @@ static int __init do_name(void)
89987 int openflags = O_WRONLY|O_CREAT;
89988 if (ml != 1)
89989 openflags |= O_TRUNC;
89990 - wfd = sys_open(collected, openflags, mode);
89991 + wfd = sys_open((char __force_user *)collected, openflags, mode);
89992
89993 if (wfd >= 0) {
89994 sys_fchown(wfd, uid, gid);
89995 @@ -317,17 +317,17 @@ static int __init do_name(void)
89996 }
89997 }
89998 } else if (S_ISDIR(mode)) {
89999 - sys_mkdir(collected, mode);
90000 - sys_chown(collected, uid, gid);
90001 - sys_chmod(collected, mode);
90002 + sys_mkdir((char __force_user *)collected, mode);
90003 + sys_chown((char __force_user *)collected, uid, gid);
90004 + sys_chmod((char __force_user *)collected, mode);
90005 dir_add(collected, mtime);
90006 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
90007 S_ISFIFO(mode) || S_ISSOCK(mode)) {
90008 if (maybe_link() == 0) {
90009 - sys_mknod(collected, mode, rdev);
90010 - sys_chown(collected, uid, gid);
90011 - sys_chmod(collected, mode);
90012 - do_utime(collected, mtime);
90013 + sys_mknod((char __force_user *)collected, mode, rdev);
90014 + sys_chown((char __force_user *)collected, uid, gid);
90015 + sys_chmod((char __force_user *)collected, mode);
90016 + do_utime((char __force_user *)collected, mtime);
90017 }
90018 }
90019 return 0;
90020 @@ -336,15 +336,15 @@ static int __init do_name(void)
90021 static int __init do_copy(void)
90022 {
90023 if (count >= body_len) {
90024 - sys_write(wfd, victim, body_len);
90025 + sys_write(wfd, (char __force_user *)victim, body_len);
90026 sys_close(wfd);
90027 - do_utime(vcollected, mtime);
90028 + do_utime((char __force_user *)vcollected, mtime);
90029 kfree(vcollected);
90030 eat(body_len);
90031 state = SkipIt;
90032 return 0;
90033 } else {
90034 - sys_write(wfd, victim, count);
90035 + sys_write(wfd, (char __force_user *)victim, count);
90036 body_len -= count;
90037 eat(count);
90038 return 1;
90039 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
90040 {
90041 collected[N_ALIGN(name_len) + body_len] = '\0';
90042 clean_path(collected, 0);
90043 - sys_symlink(collected + N_ALIGN(name_len), collected);
90044 - sys_lchown(collected, uid, gid);
90045 - do_utime(collected, mtime);
90046 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
90047 + sys_lchown((char __force_user *)collected, uid, gid);
90048 + do_utime((char __force_user *)collected, mtime);
90049 state = SkipIt;
90050 next_state = Reset;
90051 return 0;
90052 diff --git a/init/main.c b/init/main.c
90053 index 1eb4bd5..fea5bbe 100644
90054 --- a/init/main.c
90055 +++ b/init/main.c
90056 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
90057 #ifdef CONFIG_TC
90058 extern void tc_init(void);
90059 #endif
90060 +extern void grsecurity_init(void);
90061
90062 enum system_states system_state __read_mostly;
90063 EXPORT_SYMBOL(system_state);
90064 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
90065
90066 __setup("reset_devices", set_reset_devices);
90067
90068 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
90069 +extern char pax_enter_kernel_user[];
90070 +extern char pax_exit_kernel_user[];
90071 +extern pgdval_t clone_pgd_mask;
90072 +#endif
90073 +
90074 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
90075 +static int __init setup_pax_nouderef(char *str)
90076 +{
90077 +#ifdef CONFIG_X86_32
90078 + unsigned int cpu;
90079 + struct desc_struct *gdt;
90080 +
90081 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
90082 + gdt = get_cpu_gdt_table(cpu);
90083 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
90084 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
90085 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
90086 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
90087 + }
90088 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
90089 +#else
90090 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
90091 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
90092 + clone_pgd_mask = ~(pgdval_t)0UL;
90093 +#endif
90094 +
90095 + return 0;
90096 +}
90097 +early_param("pax_nouderef", setup_pax_nouderef);
90098 +#endif
90099 +
90100 +#ifdef CONFIG_PAX_SOFTMODE
90101 +int pax_softmode;
90102 +
90103 +static int __init setup_pax_softmode(char *str)
90104 +{
90105 + get_option(&str, &pax_softmode);
90106 + return 1;
90107 +}
90108 +__setup("pax_softmode=", setup_pax_softmode);
90109 +#endif
90110 +
90111 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
90112 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
90113 static const char *panic_later, *panic_param;
90114 @@ -705,52 +749,53 @@ int initcall_debug;
90115 core_param(initcall_debug, initcall_debug, bool, 0644);
90116
90117 static char msgbuf[64];
90118 -static struct boot_trace_call call;
90119 -static struct boot_trace_ret ret;
90120 +static struct boot_trace_call trace_call;
90121 +static struct boot_trace_ret trace_ret;
90122
90123 int do_one_initcall(initcall_t fn)
90124 {
90125 int count = preempt_count();
90126 ktime_t calltime, delta, rettime;
90127 + const char *msg1 = "", *msg2 = "";
90128
90129 if (initcall_debug) {
90130 - call.caller = task_pid_nr(current);
90131 - printk("calling %pF @ %i\n", fn, call.caller);
90132 + trace_call.caller = task_pid_nr(current);
90133 + printk("calling %pF @ %i\n", fn, trace_call.caller);
90134 calltime = ktime_get();
90135 - trace_boot_call(&call, fn);
90136 + trace_boot_call(&trace_call, fn);
90137 enable_boot_trace();
90138 }
90139
90140 - ret.result = fn();
90141 + trace_ret.result = fn();
90142
90143 if (initcall_debug) {
90144 disable_boot_trace();
90145 rettime = ktime_get();
90146 delta = ktime_sub(rettime, calltime);
90147 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
90148 - trace_boot_ret(&ret, fn);
90149 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
90150 + trace_boot_ret(&trace_ret, fn);
90151 printk("initcall %pF returned %d after %Ld usecs\n", fn,
90152 - ret.result, ret.duration);
90153 + trace_ret.result, trace_ret.duration);
90154 }
90155
90156 msgbuf[0] = 0;
90157
90158 - if (ret.result && ret.result != -ENODEV && initcall_debug)
90159 - sprintf(msgbuf, "error code %d ", ret.result);
90160 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
90161 + sprintf(msgbuf, "error code %d ", trace_ret.result);
90162
90163 if (preempt_count() != count) {
90164 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
90165 + msg1 = " preemption imbalance";
90166 preempt_count() = count;
90167 }
90168 if (irqs_disabled()) {
90169 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
90170 + msg2 = " disabled interrupts";
90171 local_irq_enable();
90172 }
90173 - if (msgbuf[0]) {
90174 - printk("initcall %pF returned with %s\n", fn, msgbuf);
90175 + if (msgbuf[0] || *msg1 || *msg2) {
90176 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
90177 }
90178
90179 - return ret.result;
90180 + return trace_ret.result;
90181 }
90182
90183
90184 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
90185 if (!ramdisk_execute_command)
90186 ramdisk_execute_command = "/init";
90187
90188 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
90189 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
90190 ramdisk_execute_command = NULL;
90191 prepare_namespace();
90192 }
90193
90194 + grsecurity_init();
90195 +
90196 /*
90197 * Ok, we have completed the initial bootup, and
90198 * we're essentially up and running. Get rid of the
90199 diff --git a/init/noinitramfs.c b/init/noinitramfs.c
90200 index f4c1a3a..96c19bd 100644
90201 --- a/init/noinitramfs.c
90202 +++ b/init/noinitramfs.c
90203 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
90204 {
90205 int err;
90206
90207 - err = sys_mkdir("/dev", 0755);
90208 + err = sys_mkdir((const char __user *)"/dev", 0755);
90209 if (err < 0)
90210 goto out;
90211
90212 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
90213 if (err < 0)
90214 goto out;
90215
90216 - err = sys_mkdir("/root", 0700);
90217 + err = sys_mkdir((const char __user *)"/root", 0700);
90218 if (err < 0)
90219 goto out;
90220
90221 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
90222 index d01bc14..8df81db 100644
90223 --- a/ipc/mqueue.c
90224 +++ b/ipc/mqueue.c
90225 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
90226 mq_bytes = (mq_msg_tblsz +
90227 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
90228
90229 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
90230 spin_lock(&mq_lock);
90231 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
90232 u->mq_bytes + mq_bytes >
90233 diff --git a/ipc/msg.c b/ipc/msg.c
90234 index 779f762..4af9e36 100644
90235 --- a/ipc/msg.c
90236 +++ b/ipc/msg.c
90237 @@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
90238 return security_msg_queue_associate(msq, msgflg);
90239 }
90240
90241 +static struct ipc_ops msg_ops = {
90242 + .getnew = newque,
90243 + .associate = msg_security,
90244 + .more_checks = NULL
90245 +};
90246 +
90247 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
90248 {
90249 struct ipc_namespace *ns;
90250 - struct ipc_ops msg_ops;
90251 struct ipc_params msg_params;
90252
90253 ns = current->nsproxy->ipc_ns;
90254
90255 - msg_ops.getnew = newque;
90256 - msg_ops.associate = msg_security;
90257 - msg_ops.more_checks = NULL;
90258 -
90259 msg_params.key = key;
90260 msg_params.flg = msgflg;
90261
90262 diff --git a/ipc/sem.c b/ipc/sem.c
90263 index b781007..f738b04 100644
90264 --- a/ipc/sem.c
90265 +++ b/ipc/sem.c
90266 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
90267 return 0;
90268 }
90269
90270 +static struct ipc_ops sem_ops = {
90271 + .getnew = newary,
90272 + .associate = sem_security,
90273 + .more_checks = sem_more_checks
90274 +};
90275 +
90276 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
90277 {
90278 struct ipc_namespace *ns;
90279 - struct ipc_ops sem_ops;
90280 struct ipc_params sem_params;
90281
90282 ns = current->nsproxy->ipc_ns;
90283 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
90284 if (nsems < 0 || nsems > ns->sc_semmsl)
90285 return -EINVAL;
90286
90287 - sem_ops.getnew = newary;
90288 - sem_ops.associate = sem_security;
90289 - sem_ops.more_checks = sem_more_checks;
90290 -
90291 sem_params.key = key;
90292 sem_params.flg = semflg;
90293 sem_params.u.nsems = nsems;
90294 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
90295 ushort* sem_io = fast_sem_io;
90296 int nsems;
90297
90298 + pax_track_stack();
90299 +
90300 sma = sem_lock_check(ns, semid);
90301 if (IS_ERR(sma))
90302 return PTR_ERR(sma);
90303 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
90304 unsigned long jiffies_left = 0;
90305 struct ipc_namespace *ns;
90306
90307 + pax_track_stack();
90308 +
90309 ns = current->nsproxy->ipc_ns;
90310
90311 if (nsops < 1 || semid < 0)
90312 diff --git a/ipc/shm.c b/ipc/shm.c
90313 index d30732c..e4992cd 100644
90314 --- a/ipc/shm.c
90315 +++ b/ipc/shm.c
90316 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
90317 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
90318 #endif
90319
90320 +#ifdef CONFIG_GRKERNSEC
90321 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90322 + const time_t shm_createtime, const uid_t cuid,
90323 + const int shmid);
90324 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90325 + const time_t shm_createtime);
90326 +#endif
90327 +
90328 void shm_init_ns(struct ipc_namespace *ns)
90329 {
90330 ns->shm_ctlmax = SHMMAX;
90331 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
90332 shp->shm_lprid = 0;
90333 shp->shm_atim = shp->shm_dtim = 0;
90334 shp->shm_ctim = get_seconds();
90335 +#ifdef CONFIG_GRKERNSEC
90336 + {
90337 + struct timespec timeval;
90338 + do_posix_clock_monotonic_gettime(&timeval);
90339 +
90340 + shp->shm_createtime = timeval.tv_sec;
90341 + }
90342 +#endif
90343 shp->shm_segsz = size;
90344 shp->shm_nattch = 0;
90345 shp->shm_file = file;
90346 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
90347 return 0;
90348 }
90349
90350 +static struct ipc_ops shm_ops = {
90351 + .getnew = newseg,
90352 + .associate = shm_security,
90353 + .more_checks = shm_more_checks
90354 +};
90355 +
90356 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
90357 {
90358 struct ipc_namespace *ns;
90359 - struct ipc_ops shm_ops;
90360 struct ipc_params shm_params;
90361
90362 ns = current->nsproxy->ipc_ns;
90363
90364 - shm_ops.getnew = newseg;
90365 - shm_ops.associate = shm_security;
90366 - shm_ops.more_checks = shm_more_checks;
90367 -
90368 shm_params.key = key;
90369 shm_params.flg = shmflg;
90370 shm_params.u.size = size;
90371 @@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
90372 f_mode = FMODE_READ | FMODE_WRITE;
90373 }
90374 if (shmflg & SHM_EXEC) {
90375 +
90376 +#ifdef CONFIG_PAX_MPROTECT
90377 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
90378 + goto out;
90379 +#endif
90380 +
90381 prot |= PROT_EXEC;
90382 acc_mode |= S_IXUGO;
90383 }
90384 @@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
90385 if (err)
90386 goto out_unlock;
90387
90388 +#ifdef CONFIG_GRKERNSEC
90389 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
90390 + shp->shm_perm.cuid, shmid) ||
90391 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
90392 + err = -EACCES;
90393 + goto out_unlock;
90394 + }
90395 +#endif
90396 +
90397 path.dentry = dget(shp->shm_file->f_path.dentry);
90398 path.mnt = shp->shm_file->f_path.mnt;
90399 shp->shm_nattch++;
90400 +#ifdef CONFIG_GRKERNSEC
90401 + shp->shm_lapid = current->pid;
90402 +#endif
90403 size = i_size_read(path.dentry->d_inode);
90404 shm_unlock(shp);
90405
90406 diff --git a/kernel/acct.c b/kernel/acct.c
90407 index a6605ca..ca91111 100644
90408 --- a/kernel/acct.c
90409 +++ b/kernel/acct.c
90410 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
90411 */
90412 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
90413 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
90414 - file->f_op->write(file, (char *)&ac,
90415 + file->f_op->write(file, (char __force_user *)&ac,
90416 sizeof(acct_t), &file->f_pos);
90417 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
90418 set_fs(fs);
90419 diff --git a/kernel/audit.c b/kernel/audit.c
90420 index 5feed23..48415fd 100644
90421 --- a/kernel/audit.c
90422 +++ b/kernel/audit.c
90423 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
90424 3) suppressed due to audit_rate_limit
90425 4) suppressed due to audit_backlog_limit
90426 */
90427 -static atomic_t audit_lost = ATOMIC_INIT(0);
90428 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
90429
90430 /* The netlink socket. */
90431 static struct sock *audit_sock;
90432 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
90433 unsigned long now;
90434 int print;
90435
90436 - atomic_inc(&audit_lost);
90437 + atomic_inc_unchecked(&audit_lost);
90438
90439 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
90440
90441 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
90442 printk(KERN_WARNING
90443 "audit: audit_lost=%d audit_rate_limit=%d "
90444 "audit_backlog_limit=%d\n",
90445 - atomic_read(&audit_lost),
90446 + atomic_read_unchecked(&audit_lost),
90447 audit_rate_limit,
90448 audit_backlog_limit);
90449 audit_panic(message);
90450 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
90451 status_set.pid = audit_pid;
90452 status_set.rate_limit = audit_rate_limit;
90453 status_set.backlog_limit = audit_backlog_limit;
90454 - status_set.lost = atomic_read(&audit_lost);
90455 + status_set.lost = atomic_read_unchecked(&audit_lost);
90456 status_set.backlog = skb_queue_len(&audit_skb_queue);
90457 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
90458 &status_set, sizeof(status_set));
90459 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
90460 spin_unlock_irq(&tsk->sighand->siglock);
90461 }
90462 read_unlock(&tasklist_lock);
90463 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
90464 - &s, sizeof(s));
90465 +
90466 + if (!err)
90467 + audit_send_reply(NETLINK_CB(skb).pid, seq,
90468 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
90469 break;
90470 }
90471 case AUDIT_TTY_SET: {
90472 @@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
90473 avail = audit_expand(ab,
90474 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
90475 if (!avail)
90476 - goto out;
90477 + goto out_va_end;
90478 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
90479 }
90480 - va_end(args2);
90481 if (len > 0)
90482 skb_put(skb, len);
90483 +out_va_end:
90484 + va_end(args2);
90485 out:
90486 return;
90487 }
90488 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
90489 index 267e484..ac41bc3 100644
90490 --- a/kernel/auditsc.c
90491 +++ b/kernel/auditsc.c
90492 @@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
90493 struct audit_buffer **ab,
90494 struct audit_aux_data_execve *axi)
90495 {
90496 - int i;
90497 - size_t len, len_sent = 0;
90498 + int i, len;
90499 + size_t len_sent = 0;
90500 const char __user *p;
90501 char *buf;
90502
90503 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
90504 }
90505
90506 /* global counter which is incremented every time something logs in */
90507 -static atomic_t session_id = ATOMIC_INIT(0);
90508 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
90509
90510 /**
90511 * audit_set_loginuid - set a task's audit_context loginuid
90512 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
90513 */
90514 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
90515 {
90516 - unsigned int sessionid = atomic_inc_return(&session_id);
90517 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
90518 struct audit_context *context = task->audit_context;
90519
90520 if (context && context->in_syscall) {
90521 diff --git a/kernel/capability.c b/kernel/capability.c
90522 index 8a944f5..db5001e 100644
90523 --- a/kernel/capability.c
90524 +++ b/kernel/capability.c
90525 @@ -305,10 +305,26 @@ int capable(int cap)
90526 BUG();
90527 }
90528
90529 - if (security_capable(cap) == 0) {
90530 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
90531 current->flags |= PF_SUPERPRIV;
90532 return 1;
90533 }
90534 return 0;
90535 }
90536 +
90537 +int capable_nolog(int cap)
90538 +{
90539 + if (unlikely(!cap_valid(cap))) {
90540 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
90541 + BUG();
90542 + }
90543 +
90544 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
90545 + current->flags |= PF_SUPERPRIV;
90546 + return 1;
90547 + }
90548 + return 0;
90549 +}
90550 +
90551 EXPORT_SYMBOL(capable);
90552 +EXPORT_SYMBOL(capable_nolog);
90553 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
90554 index 1fbcc74..7000012 100644
90555 --- a/kernel/cgroup.c
90556 +++ b/kernel/cgroup.c
90557 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
90558 struct hlist_head *hhead;
90559 struct cg_cgroup_link *link;
90560
90561 + pax_track_stack();
90562 +
90563 /* First see if we already have a cgroup group that matches
90564 * the desired set */
90565 read_lock(&css_set_lock);
90566 diff --git a/kernel/compat.c b/kernel/compat.c
90567 index 8bc5578..186e44a 100644
90568 --- a/kernel/compat.c
90569 +++ b/kernel/compat.c
90570 @@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
90571 mm_segment_t oldfs;
90572 long ret;
90573
90574 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
90575 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
90576 oldfs = get_fs();
90577 set_fs(KERNEL_DS);
90578 ret = hrtimer_nanosleep_restart(restart);
90579 @@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
90580 oldfs = get_fs();
90581 set_fs(KERNEL_DS);
90582 ret = hrtimer_nanosleep(&tu,
90583 - rmtp ? (struct timespec __user *)&rmt : NULL,
90584 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
90585 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
90586 set_fs(oldfs);
90587
90588 @@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
90589 mm_segment_t old_fs = get_fs();
90590
90591 set_fs(KERNEL_DS);
90592 - ret = sys_sigpending((old_sigset_t __user *) &s);
90593 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
90594 set_fs(old_fs);
90595 if (ret == 0)
90596 ret = put_user(s, set);
90597 @@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
90598 old_fs = get_fs();
90599 set_fs(KERNEL_DS);
90600 ret = sys_sigprocmask(how,
90601 - set ? (old_sigset_t __user *) &s : NULL,
90602 - oset ? (old_sigset_t __user *) &s : NULL);
90603 + set ? (old_sigset_t __force_user *) &s : NULL,
90604 + oset ? (old_sigset_t __force_user *) &s : NULL);
90605 set_fs(old_fs);
90606 if (ret == 0)
90607 if (oset)
90608 @@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
90609 mm_segment_t old_fs = get_fs();
90610
90611 set_fs(KERNEL_DS);
90612 - ret = sys_old_getrlimit(resource, &r);
90613 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
90614 set_fs(old_fs);
90615
90616 if (!ret) {
90617 @@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
90618 mm_segment_t old_fs = get_fs();
90619
90620 set_fs(KERNEL_DS);
90621 - ret = sys_getrusage(who, (struct rusage __user *) &r);
90622 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
90623 set_fs(old_fs);
90624
90625 if (ret)
90626 @@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
90627 set_fs (KERNEL_DS);
90628 ret = sys_wait4(pid,
90629 (stat_addr ?
90630 - (unsigned int __user *) &status : NULL),
90631 - options, (struct rusage __user *) &r);
90632 + (unsigned int __force_user *) &status : NULL),
90633 + options, (struct rusage __force_user *) &r);
90634 set_fs (old_fs);
90635
90636 if (ret > 0) {
90637 @@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
90638 memset(&info, 0, sizeof(info));
90639
90640 set_fs(KERNEL_DS);
90641 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
90642 - uru ? (struct rusage __user *)&ru : NULL);
90643 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
90644 + uru ? (struct rusage __force_user *)&ru : NULL);
90645 set_fs(old_fs);
90646
90647 if ((ret < 0) || (info.si_signo == 0))
90648 @@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
90649 oldfs = get_fs();
90650 set_fs(KERNEL_DS);
90651 err = sys_timer_settime(timer_id, flags,
90652 - (struct itimerspec __user *) &newts,
90653 - (struct itimerspec __user *) &oldts);
90654 + (struct itimerspec __force_user *) &newts,
90655 + (struct itimerspec __force_user *) &oldts);
90656 set_fs(oldfs);
90657 if (!err && old && put_compat_itimerspec(old, &oldts))
90658 return -EFAULT;
90659 @@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
90660 oldfs = get_fs();
90661 set_fs(KERNEL_DS);
90662 err = sys_timer_gettime(timer_id,
90663 - (struct itimerspec __user *) &ts);
90664 + (struct itimerspec __force_user *) &ts);
90665 set_fs(oldfs);
90666 if (!err && put_compat_itimerspec(setting, &ts))
90667 return -EFAULT;
90668 @@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
90669 oldfs = get_fs();
90670 set_fs(KERNEL_DS);
90671 err = sys_clock_settime(which_clock,
90672 - (struct timespec __user *) &ts);
90673 + (struct timespec __force_user *) &ts);
90674 set_fs(oldfs);
90675 return err;
90676 }
90677 @@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
90678 oldfs = get_fs();
90679 set_fs(KERNEL_DS);
90680 err = sys_clock_gettime(which_clock,
90681 - (struct timespec __user *) &ts);
90682 + (struct timespec __force_user *) &ts);
90683 set_fs(oldfs);
90684 if (!err && put_compat_timespec(&ts, tp))
90685 return -EFAULT;
90686 @@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
90687 oldfs = get_fs();
90688 set_fs(KERNEL_DS);
90689 err = sys_clock_getres(which_clock,
90690 - (struct timespec __user *) &ts);
90691 + (struct timespec __force_user *) &ts);
90692 set_fs(oldfs);
90693 if (!err && tp && put_compat_timespec(&ts, tp))
90694 return -EFAULT;
90695 @@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
90696 long err;
90697 mm_segment_t oldfs;
90698 struct timespec tu;
90699 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
90700 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
90701
90702 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
90703 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
90704 oldfs = get_fs();
90705 set_fs(KERNEL_DS);
90706 err = clock_nanosleep_restart(restart);
90707 @@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
90708 oldfs = get_fs();
90709 set_fs(KERNEL_DS);
90710 err = sys_clock_nanosleep(which_clock, flags,
90711 - (struct timespec __user *) &in,
90712 - (struct timespec __user *) &out);
90713 + (struct timespec __force_user *) &in,
90714 + (struct timespec __force_user *) &out);
90715 set_fs(oldfs);
90716
90717 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
90718 diff --git a/kernel/configs.c b/kernel/configs.c
90719 index abaee68..047facd 100644
90720 --- a/kernel/configs.c
90721 +++ b/kernel/configs.c
90722 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
90723 struct proc_dir_entry *entry;
90724
90725 /* create the current config file */
90726 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
90727 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
90728 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
90729 + &ikconfig_file_ops);
90730 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90731 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
90732 + &ikconfig_file_ops);
90733 +#endif
90734 +#else
90735 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
90736 &ikconfig_file_ops);
90737 +#endif
90738 +
90739 if (!entry)
90740 return -ENOMEM;
90741
90742 diff --git a/kernel/cpu.c b/kernel/cpu.c
90743 index 3f2f04f..4e53ded 100644
90744 --- a/kernel/cpu.c
90745 +++ b/kernel/cpu.c
90746 @@ -20,7 +20,7 @@
90747 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
90748 static DEFINE_MUTEX(cpu_add_remove_lock);
90749
90750 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
90751 +static RAW_NOTIFIER_HEAD(cpu_chain);
90752
90753 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
90754 * Should always be manipulated under cpu_add_remove_lock
90755 diff --git a/kernel/cred.c b/kernel/cred.c
90756 index 0b5b5fc..f7fe51a 100644
90757 --- a/kernel/cred.c
90758 +++ b/kernel/cred.c
90759 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
90760 */
90761 void __put_cred(struct cred *cred)
90762 {
90763 + pax_track_stack();
90764 +
90765 kdebug("__put_cred(%p{%d,%d})", cred,
90766 atomic_read(&cred->usage),
90767 read_cred_subscribers(cred));
90768 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
90769 {
90770 struct cred *cred;
90771
90772 + pax_track_stack();
90773 +
90774 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
90775 atomic_read(&tsk->cred->usage),
90776 read_cred_subscribers(tsk->cred));
90777 @@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
90778 validate_creds(cred);
90779 put_cred(cred);
90780 }
90781 +
90782 +#ifdef CONFIG_GRKERNSEC_SETXID
90783 + cred = (struct cred *) tsk->delayed_cred;
90784 + if (cred) {
90785 + tsk->delayed_cred = NULL;
90786 + validate_creds(cred);
90787 + put_cred(cred);
90788 + }
90789 +#endif
90790 }
90791
90792 /**
90793 @@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
90794 {
90795 const struct cred *cred;
90796
90797 + pax_track_stack();
90798 +
90799 rcu_read_lock();
90800
90801 do {
90802 @@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
90803 {
90804 struct cred *new;
90805
90806 + pax_track_stack();
90807 +
90808 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
90809 if (!new)
90810 return NULL;
90811 @@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
90812 const struct cred *old;
90813 struct cred *new;
90814
90815 + pax_track_stack();
90816 +
90817 validate_process_creds();
90818
90819 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
90820 @@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
90821 struct thread_group_cred *tgcred = NULL;
90822 struct cred *new;
90823
90824 + pax_track_stack();
90825 +
90826 #ifdef CONFIG_KEYS
90827 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
90828 if (!tgcred)
90829 @@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
90830 struct cred *new;
90831 int ret;
90832
90833 + pax_track_stack();
90834 +
90835 mutex_init(&p->cred_guard_mutex);
90836
90837 if (
90838 @@ -523,11 +546,13 @@ error_put:
90839 * Always returns 0 thus allowing this function to be tail-called at the end
90840 * of, say, sys_setgid().
90841 */
90842 -int commit_creds(struct cred *new)
90843 +static int __commit_creds(struct cred *new)
90844 {
90845 struct task_struct *task = current;
90846 const struct cred *old = task->real_cred;
90847
90848 + pax_track_stack();
90849 +
90850 kdebug("commit_creds(%p{%d,%d})", new,
90851 atomic_read(&new->usage),
90852 read_cred_subscribers(new));
90853 @@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
90854
90855 get_cred(new); /* we will require a ref for the subj creds too */
90856
90857 + gr_set_role_label(task, new->uid, new->gid);
90858 +
90859 /* dumpability changes */
90860 if (old->euid != new->euid ||
90861 old->egid != new->egid ||
90862 @@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
90863 key_fsgid_changed(task);
90864
90865 /* do it
90866 - * - What if a process setreuid()'s and this brings the
90867 - * new uid over his NPROC rlimit? We can check this now
90868 - * cheaply with the new uid cache, so if it matters
90869 - * we should be checking for it. -DaveM
90870 + * RLIMIT_NPROC limits on user->processes have already been checked
90871 + * in set_user().
90872 */
90873 alter_cred_subscribers(new, 2);
90874 if (new->user != old->user)
90875 @@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
90876 put_cred(old);
90877 return 0;
90878 }
90879 +
90880 +#ifdef CONFIG_GRKERNSEC_SETXID
90881 +extern int set_user(struct cred *new);
90882 +
90883 +void gr_delayed_cred_worker(void)
90884 +{
90885 + const struct cred *new = current->delayed_cred;
90886 + struct cred *ncred;
90887 +
90888 + current->delayed_cred = NULL;
90889 +
90890 + if (current_uid() && new != NULL) {
90891 + // from doing get_cred on it when queueing this
90892 + put_cred(new);
90893 + return;
90894 + } else if (new == NULL)
90895 + return;
90896 +
90897 + ncred = prepare_creds();
90898 + if (!ncred)
90899 + goto die;
90900 + // uids
90901 + ncred->uid = new->uid;
90902 + ncred->euid = new->euid;
90903 + ncred->suid = new->suid;
90904 + ncred->fsuid = new->fsuid;
90905 + // gids
90906 + ncred->gid = new->gid;
90907 + ncred->egid = new->egid;
90908 + ncred->sgid = new->sgid;
90909 + ncred->fsgid = new->fsgid;
90910 + // groups
90911 + if (set_groups(ncred, new->group_info) < 0) {
90912 + abort_creds(ncred);
90913 + goto die;
90914 + }
90915 + // caps
90916 + ncred->securebits = new->securebits;
90917 + ncred->cap_inheritable = new->cap_inheritable;
90918 + ncred->cap_permitted = new->cap_permitted;
90919 + ncred->cap_effective = new->cap_effective;
90920 + ncred->cap_bset = new->cap_bset;
90921 +
90922 + if (set_user(ncred)) {
90923 + abort_creds(ncred);
90924 + goto die;
90925 + }
90926 +
90927 + // from doing get_cred on it when queueing this
90928 + put_cred(new);
90929 +
90930 + __commit_creds(ncred);
90931 + return;
90932 +die:
90933 + // from doing get_cred on it when queueing this
90934 + put_cred(new);
90935 + do_group_exit(SIGKILL);
90936 +}
90937 +#endif
90938 +
90939 +int commit_creds(struct cred *new)
90940 +{
90941 +#ifdef CONFIG_GRKERNSEC_SETXID
90942 + struct task_struct *t;
90943 +
90944 + /* we won't get called with tasklist_lock held for writing
90945 + and interrupts disabled as the cred struct in that case is
90946 + init_cred
90947 + */
90948 + if (grsec_enable_setxid && !current_is_single_threaded() &&
90949 + !current_uid() && new->uid) {
90950 + rcu_read_lock();
90951 + read_lock(&tasklist_lock);
90952 + for (t = next_thread(current); t != current;
90953 + t = next_thread(t)) {
90954 + if (t->delayed_cred == NULL) {
90955 + t->delayed_cred = get_cred(new);
90956 + set_tsk_need_resched(t);
90957 + }
90958 + }
90959 + read_unlock(&tasklist_lock);
90960 + rcu_read_unlock();
90961 + }
90962 +#endif
90963 + return __commit_creds(new);
90964 +}
90965 +
90966 EXPORT_SYMBOL(commit_creds);
90967
90968 +
90969 /**
90970 * abort_creds - Discard a set of credentials and unlock the current task
90971 * @new: The credentials that were going to be applied
90972 @@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
90973 */
90974 void abort_creds(struct cred *new)
90975 {
90976 + pax_track_stack();
90977 +
90978 kdebug("abort_creds(%p{%d,%d})", new,
90979 atomic_read(&new->usage),
90980 read_cred_subscribers(new));
90981 @@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
90982 {
90983 const struct cred *old = current->cred;
90984
90985 + pax_track_stack();
90986 +
90987 kdebug("override_creds(%p{%d,%d})", new,
90988 atomic_read(&new->usage),
90989 read_cred_subscribers(new));
90990 @@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
90991 {
90992 const struct cred *override = current->cred;
90993
90994 + pax_track_stack();
90995 +
90996 kdebug("revert_creds(%p{%d,%d})", old,
90997 atomic_read(&old->usage),
90998 read_cred_subscribers(old));
90999 @@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
91000 const struct cred *old;
91001 struct cred *new;
91002
91003 + pax_track_stack();
91004 +
91005 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
91006 if (!new)
91007 return NULL;
91008 @@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
91009 */
91010 int set_security_override(struct cred *new, u32 secid)
91011 {
91012 + pax_track_stack();
91013 +
91014 return security_kernel_act_as(new, secid);
91015 }
91016 EXPORT_SYMBOL(set_security_override);
91017 @@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
91018 u32 secid;
91019 int ret;
91020
91021 + pax_track_stack();
91022 +
91023 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
91024 if (ret < 0)
91025 return ret;
91026 diff --git a/kernel/exit.c b/kernel/exit.c
91027 index 0f8fae3..66af9b1 100644
91028 --- a/kernel/exit.c
91029 +++ b/kernel/exit.c
91030 @@ -55,6 +55,10 @@
91031 #include <asm/pgtable.h>
91032 #include <asm/mmu_context.h>
91033
91034 +#ifdef CONFIG_GRKERNSEC
91035 +extern rwlock_t grsec_exec_file_lock;
91036 +#endif
91037 +
91038 static void exit_mm(struct task_struct * tsk);
91039
91040 static void __unhash_process(struct task_struct *p)
91041 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
91042 struct task_struct *leader;
91043 int zap_leader;
91044 repeat:
91045 +#ifdef CONFIG_NET
91046 + gr_del_task_from_ip_table(p);
91047 +#endif
91048 +
91049 tracehook_prepare_release_task(p);
91050 /* don't need to get the RCU readlock here - the process is dead and
91051 * can't be modifying its own credentials */
91052 @@ -397,7 +405,7 @@ int allow_signal(int sig)
91053 * know it'll be handled, so that they don't get converted to
91054 * SIGKILL or just silently dropped.
91055 */
91056 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
91057 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
91058 recalc_sigpending();
91059 spin_unlock_irq(&current->sighand->siglock);
91060 return 0;
91061 @@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
91062 vsnprintf(current->comm, sizeof(current->comm), name, args);
91063 va_end(args);
91064
91065 +#ifdef CONFIG_GRKERNSEC
91066 + write_lock(&grsec_exec_file_lock);
91067 + if (current->exec_file) {
91068 + fput(current->exec_file);
91069 + current->exec_file = NULL;
91070 + }
91071 + write_unlock(&grsec_exec_file_lock);
91072 +#endif
91073 +
91074 + gr_set_kernel_label(current);
91075 +
91076 /*
91077 * If we were started as result of loading a module, close all of the
91078 * user space pages. We don't need them, and if we didn't close them
91079 @@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
91080 struct task_struct *tsk = current;
91081 int group_dead;
91082
91083 - profile_task_exit(tsk);
91084 -
91085 - WARN_ON(atomic_read(&tsk->fs_excl));
91086 -
91087 + /*
91088 + * Check this first since set_fs() below depends on
91089 + * current_thread_info(), which we better not access when we're in
91090 + * interrupt context. Other than that, we want to do the set_fs()
91091 + * as early as possible.
91092 + */
91093 if (unlikely(in_interrupt()))
91094 panic("Aiee, killing interrupt handler!");
91095 - if (unlikely(!tsk->pid))
91096 - panic("Attempted to kill the idle task!");
91097
91098 /*
91099 - * If do_exit is called because this processes oopsed, it's possible
91100 + * If do_exit is called because this processes Oops'ed, it's possible
91101 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
91102 * continuing. Amongst other possible reasons, this is to prevent
91103 * mm_release()->clear_child_tid() from writing to a user-controlled
91104 @@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
91105 */
91106 set_fs(USER_DS);
91107
91108 + profile_task_exit(tsk);
91109 +
91110 + WARN_ON(atomic_read(&tsk->fs_excl));
91111 +
91112 + if (unlikely(!tsk->pid))
91113 + panic("Attempted to kill the idle task!");
91114 +
91115 tracehook_report_exit(&code);
91116
91117 validate_creds_for_do_exit(tsk);
91118 @@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
91119 tsk->exit_code = code;
91120 taskstats_exit(tsk, group_dead);
91121
91122 + gr_acl_handle_psacct(tsk, code);
91123 + gr_acl_handle_exit();
91124 +
91125 exit_mm(tsk);
91126
91127 if (group_dead)
91128 @@ -1020,7 +1049,7 @@ NORET_TYPE void do_exit(long code)
91129 tsk->flags |= PF_EXITPIDONE;
91130
91131 if (tsk->io_context)
91132 - exit_io_context();
91133 + exit_io_context(tsk);
91134
91135 if (tsk->splice_pipe)
91136 __free_pipe_info(tsk->splice_pipe);
91137 @@ -1059,7 +1088,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
91138 * Take down every thread in the group. This is called by fatal signals
91139 * as well as by sys_exit_group (below).
91140 */
91141 -NORET_TYPE void
91142 +__noreturn void
91143 do_group_exit(int exit_code)
91144 {
91145 struct signal_struct *sig = current->signal;
91146 @@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
91147
91148 if (unlikely(wo->wo_flags & WNOWAIT)) {
91149 int exit_code = p->exit_code;
91150 - int why, status;
91151 + int why;
91152
91153 get_task_struct(p);
91154 read_unlock(&tasklist_lock);
91155 diff --git a/kernel/fork.c b/kernel/fork.c
91156 index 4bde56f..8976a8f 100644
91157 --- a/kernel/fork.c
91158 +++ b/kernel/fork.c
91159 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91160 *stackend = STACK_END_MAGIC; /* for overflow detection */
91161
91162 #ifdef CONFIG_CC_STACKPROTECTOR
91163 - tsk->stack_canary = get_random_int();
91164 + tsk->stack_canary = pax_get_random_long();
91165 #endif
91166
91167 /* One for us, one for whoever does the "release_task()" (usually parent) */
91168 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91169 mm->locked_vm = 0;
91170 mm->mmap = NULL;
91171 mm->mmap_cache = NULL;
91172 - mm->free_area_cache = oldmm->mmap_base;
91173 - mm->cached_hole_size = ~0UL;
91174 + mm->free_area_cache = oldmm->free_area_cache;
91175 + mm->cached_hole_size = oldmm->cached_hole_size;
91176 mm->map_count = 0;
91177 cpumask_clear(mm_cpumask(mm));
91178 mm->mm_rb = RB_ROOT;
91179 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91180 tmp->vm_flags &= ~VM_LOCKED;
91181 tmp->vm_mm = mm;
91182 tmp->vm_next = tmp->vm_prev = NULL;
91183 + tmp->vm_mirror = NULL;
91184 anon_vma_link(tmp);
91185 file = tmp->vm_file;
91186 if (file) {
91187 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91188 if (retval)
91189 goto out;
91190 }
91191 +
91192 +#ifdef CONFIG_PAX_SEGMEXEC
91193 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
91194 + struct vm_area_struct *mpnt_m;
91195 +
91196 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
91197 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
91198 +
91199 + if (!mpnt->vm_mirror)
91200 + continue;
91201 +
91202 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
91203 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
91204 + mpnt->vm_mirror = mpnt_m;
91205 + } else {
91206 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
91207 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
91208 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
91209 + mpnt->vm_mirror->vm_mirror = mpnt;
91210 + }
91211 + }
91212 + BUG_ON(mpnt_m);
91213 + }
91214 +#endif
91215 +
91216 /* a new mm has just been created */
91217 arch_dup_mmap(oldmm, mm);
91218 retval = 0;
91219 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
91220 write_unlock(&fs->lock);
91221 return -EAGAIN;
91222 }
91223 - fs->users++;
91224 + atomic_inc(&fs->users);
91225 write_unlock(&fs->lock);
91226 return 0;
91227 }
91228 tsk->fs = copy_fs_struct(fs);
91229 if (!tsk->fs)
91230 return -ENOMEM;
91231 + gr_set_chroot_entries(tsk, &tsk->fs->root);
91232 return 0;
91233 }
91234
91235 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91236 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
91237 #endif
91238 retval = -EAGAIN;
91239 +
91240 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
91241 +
91242 if (atomic_read(&p->real_cred->user->processes) >=
91243 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
91244 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
91245 - p->real_cred->user != INIT_USER)
91246 + if (p->real_cred->user != INIT_USER &&
91247 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
91248 goto bad_fork_free;
91249 }
91250 + current->flags &= ~PF_NPROC_EXCEEDED;
91251
91252 retval = copy_creds(p, clone_flags);
91253 if (retval < 0)
91254 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91255 goto bad_fork_free_pid;
91256 }
91257
91258 + gr_copy_label(p);
91259 +
91260 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
91261 /*
91262 * Clear TID on mm_release()?
91263 @@ -1299,7 +1332,8 @@ bad_fork_free_pid:
91264 if (pid != &init_struct_pid)
91265 free_pid(pid);
91266 bad_fork_cleanup_io:
91267 - put_io_context(p->io_context);
91268 + if (p->io_context)
91269 + exit_io_context(p);
91270 bad_fork_cleanup_namespaces:
91271 exit_task_namespaces(p);
91272 bad_fork_cleanup_mm:
91273 @@ -1333,6 +1367,8 @@ bad_fork_cleanup_count:
91274 bad_fork_free:
91275 free_task(p);
91276 fork_out:
91277 + gr_log_forkfail(retval);
91278 +
91279 return ERR_PTR(retval);
91280 }
91281
91282 @@ -1426,6 +1462,8 @@ long do_fork(unsigned long clone_flags,
91283 if (clone_flags & CLONE_PARENT_SETTID)
91284 put_user(nr, parent_tidptr);
91285
91286 + gr_handle_brute_check();
91287 +
91288 if (clone_flags & CLONE_VFORK) {
91289 p->vfork_done = &vfork;
91290 init_completion(&vfork);
91291 @@ -1558,7 +1596,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
91292 return 0;
91293
91294 /* don't need lock here; in the worst case we'll do useless copy */
91295 - if (fs->users == 1)
91296 + if (atomic_read(&fs->users) == 1)
91297 return 0;
91298
91299 *new_fsp = copy_fs_struct(fs);
91300 @@ -1681,7 +1719,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
91301 fs = current->fs;
91302 write_lock(&fs->lock);
91303 current->fs = new_fs;
91304 - if (--fs->users)
91305 + gr_set_chroot_entries(current, &current->fs->root);
91306 + if (atomic_dec_return(&fs->users))
91307 new_fs = NULL;
91308 else
91309 new_fs = fs;
91310 diff --git a/kernel/futex.c b/kernel/futex.c
91311 index fb98c9f..333faec 100644
91312 --- a/kernel/futex.c
91313 +++ b/kernel/futex.c
91314 @@ -54,6 +54,7 @@
91315 #include <linux/mount.h>
91316 #include <linux/pagemap.h>
91317 #include <linux/syscalls.h>
91318 +#include <linux/ptrace.h>
91319 #include <linux/signal.h>
91320 #include <linux/module.h>
91321 #include <linux/magic.h>
91322 @@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
91323 struct page *page;
91324 int err, ro = 0;
91325
91326 +#ifdef CONFIG_PAX_SEGMEXEC
91327 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
91328 + return -EFAULT;
91329 +#endif
91330 +
91331 /*
91332 * The futex address must be "naturally" aligned.
91333 */
91334 @@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
91335 struct futex_q q;
91336 int ret;
91337
91338 + pax_track_stack();
91339 +
91340 if (!bitset)
91341 return -EINVAL;
91342
91343 @@ -1871,7 +1879,7 @@ retry:
91344
91345 restart = &current_thread_info()->restart_block;
91346 restart->fn = futex_wait_restart;
91347 - restart->futex.uaddr = (u32 *)uaddr;
91348 + restart->futex.uaddr = uaddr;
91349 restart->futex.val = val;
91350 restart->futex.time = abs_time->tv64;
91351 restart->futex.bitset = bitset;
91352 @@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
91353 struct futex_q q;
91354 int res, ret;
91355
91356 + pax_track_stack();
91357 +
91358 if (!bitset)
91359 return -EINVAL;
91360
91361 @@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
91362 if (!p)
91363 goto err_unlock;
91364 ret = -EPERM;
91365 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
91366 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
91367 + goto err_unlock;
91368 +#endif
91369 pcred = __task_cred(p);
91370 if (cred->euid != pcred->euid &&
91371 cred->euid != pcred->uid &&
91372 @@ -2489,7 +2503,7 @@ retry:
91373 */
91374 static inline int fetch_robust_entry(struct robust_list __user **entry,
91375 struct robust_list __user * __user *head,
91376 - int *pi)
91377 + unsigned int *pi)
91378 {
91379 unsigned long uentry;
91380
91381 @@ -2670,6 +2684,7 @@ static int __init futex_init(void)
91382 {
91383 u32 curval;
91384 int i;
91385 + mm_segment_t oldfs;
91386
91387 /*
91388 * This will fail and we want it. Some arch implementations do
91389 @@ -2681,7 +2696,10 @@ static int __init futex_init(void)
91390 * implementation, the non functional ones will return
91391 * -ENOSYS.
91392 */
91393 + oldfs = get_fs();
91394 + set_fs(USER_DS);
91395 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
91396 + set_fs(oldfs);
91397 if (curval == -EFAULT)
91398 futex_cmpxchg_enabled = 1;
91399
91400 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
91401 index 2357165..eb25501 100644
91402 --- a/kernel/futex_compat.c
91403 +++ b/kernel/futex_compat.c
91404 @@ -10,6 +10,7 @@
91405 #include <linux/compat.h>
91406 #include <linux/nsproxy.h>
91407 #include <linux/futex.h>
91408 +#include <linux/ptrace.h>
91409
91410 #include <asm/uaccess.h>
91411
91412 @@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
91413 {
91414 struct compat_robust_list_head __user *head;
91415 unsigned long ret;
91416 - const struct cred *cred = current_cred(), *pcred;
91417 + const struct cred *cred = current_cred();
91418 + const struct cred *pcred;
91419
91420 if (!futex_cmpxchg_enabled)
91421 return -ENOSYS;
91422 @@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
91423 if (!p)
91424 goto err_unlock;
91425 ret = -EPERM;
91426 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
91427 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
91428 + goto err_unlock;
91429 +#endif
91430 pcred = __task_cred(p);
91431 if (cred->euid != pcred->euid &&
91432 cred->euid != pcred->uid &&
91433 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
91434 index 9b22d03..6295b62 100644
91435 --- a/kernel/gcov/base.c
91436 +++ b/kernel/gcov/base.c
91437 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
91438 }
91439
91440 #ifdef CONFIG_MODULES
91441 -static inline int within(void *addr, void *start, unsigned long size)
91442 -{
91443 - return ((addr >= start) && (addr < start + size));
91444 -}
91445 -
91446 /* Update list and generate events when modules are unloaded. */
91447 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91448 void *data)
91449 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91450 prev = NULL;
91451 /* Remove entries located in module from linked list. */
91452 for (info = gcov_info_head; info; info = info->next) {
91453 - if (within(info, mod->module_core, mod->core_size)) {
91454 + if (within_module_core_rw((unsigned long)info, mod)) {
91455 if (prev)
91456 prev->next = info->next;
91457 else
91458 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
91459 index a6e9d00..a0da4f9 100644
91460 --- a/kernel/hrtimer.c
91461 +++ b/kernel/hrtimer.c
91462 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
91463 local_irq_restore(flags);
91464 }
91465
91466 -static void run_hrtimer_softirq(struct softirq_action *h)
91467 +static void run_hrtimer_softirq(void)
91468 {
91469 hrtimer_peek_ahead_timers();
91470 }
91471 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
91472 index 8b6b8b6..6bc87df 100644
91473 --- a/kernel/kallsyms.c
91474 +++ b/kernel/kallsyms.c
91475 @@ -11,6 +11,9 @@
91476 * Changed the compression method from stem compression to "table lookup"
91477 * compression (see scripts/kallsyms.c for a more complete description)
91478 */
91479 +#ifdef CONFIG_GRKERNSEC_HIDESYM
91480 +#define __INCLUDED_BY_HIDESYM 1
91481 +#endif
91482 #include <linux/kallsyms.h>
91483 #include <linux/module.h>
91484 #include <linux/init.h>
91485 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
91486
91487 static inline int is_kernel_inittext(unsigned long addr)
91488 {
91489 + if (system_state != SYSTEM_BOOTING)
91490 + return 0;
91491 +
91492 if (addr >= (unsigned long)_sinittext
91493 && addr <= (unsigned long)_einittext)
91494 return 1;
91495 return 0;
91496 }
91497
91498 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91499 +#ifdef CONFIG_MODULES
91500 +static inline int is_module_text(unsigned long addr)
91501 +{
91502 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
91503 + return 1;
91504 +
91505 + addr = ktla_ktva(addr);
91506 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
91507 +}
91508 +#else
91509 +static inline int is_module_text(unsigned long addr)
91510 +{
91511 + return 0;
91512 +}
91513 +#endif
91514 +#endif
91515 +
91516 static inline int is_kernel_text(unsigned long addr)
91517 {
91518 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
91519 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
91520
91521 static inline int is_kernel(unsigned long addr)
91522 {
91523 +
91524 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91525 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
91526 + return 1;
91527 +
91528 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
91529 +#else
91530 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
91531 +#endif
91532 +
91533 return 1;
91534 return in_gate_area_no_task(addr);
91535 }
91536
91537 static int is_ksym_addr(unsigned long addr)
91538 {
91539 +
91540 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91541 + if (is_module_text(addr))
91542 + return 0;
91543 +#endif
91544 +
91545 if (all_var)
91546 return is_kernel(addr);
91547
91548 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
91549
91550 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
91551 {
91552 - iter->name[0] = '\0';
91553 iter->nameoff = get_symbol_offset(new_pos);
91554 iter->pos = new_pos;
91555 }
91556 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
91557 {
91558 struct kallsym_iter *iter = m->private;
91559
91560 +#ifdef CONFIG_GRKERNSEC_HIDESYM
91561 + if (current_uid())
91562 + return 0;
91563 +#endif
91564 +
91565 /* Some debugging symbols have no name. Ignore them. */
91566 if (!iter->name[0])
91567 return 0;
91568 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
91569 struct kallsym_iter *iter;
91570 int ret;
91571
91572 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
91573 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
91574 if (!iter)
91575 return -ENOMEM;
91576 reset_iter(iter, 0);
91577 diff --git a/kernel/kexec.c b/kernel/kexec.c
91578 index f336e21..9c1c20b 100644
91579 --- a/kernel/kexec.c
91580 +++ b/kernel/kexec.c
91581 @@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
91582 unsigned long flags)
91583 {
91584 struct compat_kexec_segment in;
91585 - struct kexec_segment out, __user *ksegments;
91586 + struct kexec_segment out;
91587 + struct kexec_segment __user *ksegments;
91588 unsigned long i, result;
91589
91590 /* Don't allow clients that don't understand the native
91591 diff --git a/kernel/kgdb.c b/kernel/kgdb.c
91592 index 53dae4b..9ba3743 100644
91593 --- a/kernel/kgdb.c
91594 +++ b/kernel/kgdb.c
91595 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
91596 /* Guard for recursive entry */
91597 static int exception_level;
91598
91599 -static struct kgdb_io *kgdb_io_ops;
91600 +static const struct kgdb_io *kgdb_io_ops;
91601 static DEFINE_SPINLOCK(kgdb_registration_lock);
91602
91603 /* kgdb console driver is loaded */
91604 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
91605 */
91606 static atomic_t passive_cpu_wait[NR_CPUS];
91607 static atomic_t cpu_in_kgdb[NR_CPUS];
91608 -atomic_t kgdb_setting_breakpoint;
91609 +atomic_unchecked_t kgdb_setting_breakpoint;
91610
91611 struct task_struct *kgdb_usethread;
91612 struct task_struct *kgdb_contthread;
91613 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
91614 sizeof(unsigned long)];
91615
91616 /* to keep track of the CPU which is doing the single stepping*/
91617 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
91618 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
91619
91620 /*
91621 * If you are debugging a problem where roundup (the collection of
91622 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
91623 return 0;
91624 if (kgdb_connected)
91625 return 1;
91626 - if (atomic_read(&kgdb_setting_breakpoint))
91627 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
91628 return 1;
91629 if (print_wait)
91630 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
91631 @@ -1426,8 +1426,8 @@ acquirelock:
91632 * instance of the exception handler wanted to come into the
91633 * debugger on a different CPU via a single step
91634 */
91635 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
91636 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
91637 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
91638 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
91639
91640 atomic_set(&kgdb_active, -1);
91641 touch_softlockup_watchdog();
91642 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
91643 *
91644 * Register it with the KGDB core.
91645 */
91646 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
91647 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
91648 {
91649 int err;
91650
91651 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
91652 *
91653 * Unregister it with the KGDB core.
91654 */
91655 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
91656 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
91657 {
91658 BUG_ON(kgdb_connected);
91659
91660 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
91661 */
91662 void kgdb_breakpoint(void)
91663 {
91664 - atomic_set(&kgdb_setting_breakpoint, 1);
91665 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
91666 wmb(); /* Sync point before breakpoint */
91667 arch_kgdb_breakpoint();
91668 wmb(); /* Sync point after breakpoint */
91669 - atomic_set(&kgdb_setting_breakpoint, 0);
91670 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
91671 }
91672 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
91673
91674 diff --git a/kernel/kmod.c b/kernel/kmod.c
91675 index a061472..40884b6 100644
91676 --- a/kernel/kmod.c
91677 +++ b/kernel/kmod.c
91678 @@ -68,13 +68,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
91679 * If module auto-loading support is disabled then this function
91680 * becomes a no-operation.
91681 */
91682 -int __request_module(bool wait, const char *fmt, ...)
91683 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
91684 {
91685 - va_list args;
91686 char module_name[MODULE_NAME_LEN];
91687 unsigned int max_modprobes;
91688 int ret;
91689 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
91690 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
91691 static char *envp[] = { "HOME=/",
91692 "TERM=linux",
91693 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
91694 @@ -87,12 +86,24 @@ int __request_module(bool wait, const char *fmt, ...)
91695 if (ret)
91696 return ret;
91697
91698 - va_start(args, fmt);
91699 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
91700 - va_end(args);
91701 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
91702 if (ret >= MODULE_NAME_LEN)
91703 return -ENAMETOOLONG;
91704
91705 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
91706 + if (!current_uid()) {
91707 + /* hack to workaround consolekit/udisks stupidity */
91708 + read_lock(&tasklist_lock);
91709 + if (!strcmp(current->comm, "mount") &&
91710 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
91711 + read_unlock(&tasklist_lock);
91712 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
91713 + return -EPERM;
91714 + }
91715 + read_unlock(&tasklist_lock);
91716 + }
91717 +#endif
91718 +
91719 /* If modprobe needs a service that is in a module, we get a recursive
91720 * loop. Limit the number of running kmod threads to max_threads/2 or
91721 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
91722 @@ -126,6 +137,48 @@ int __request_module(bool wait, const char *fmt, ...)
91723 atomic_dec(&kmod_concurrent);
91724 return ret;
91725 }
91726 +
91727 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
91728 +{
91729 + va_list args;
91730 + int ret;
91731 +
91732 + va_start(args, fmt);
91733 + ret = ____request_module(wait, module_param, fmt, args);
91734 + va_end(args);
91735 +
91736 + return ret;
91737 +}
91738 +
91739 +int __request_module(bool wait, const char *fmt, ...)
91740 +{
91741 + va_list args;
91742 + int ret;
91743 +
91744 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
91745 + if (current_uid()) {
91746 + char module_param[MODULE_NAME_LEN];
91747 +
91748 + memset(module_param, 0, sizeof(module_param));
91749 +
91750 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
91751 +
91752 + va_start(args, fmt);
91753 + ret = ____request_module(wait, module_param, fmt, args);
91754 + va_end(args);
91755 +
91756 + return ret;
91757 + }
91758 +#endif
91759 +
91760 + va_start(args, fmt);
91761 + ret = ____request_module(wait, NULL, fmt, args);
91762 + va_end(args);
91763 +
91764 + return ret;
91765 +}
91766 +
91767 +
91768 EXPORT_SYMBOL(__request_module);
91769 #endif /* CONFIG_MODULES */
91770
91771 @@ -231,7 +284,7 @@ static int wait_for_helper(void *data)
91772 *
91773 * Thus the __user pointer cast is valid here.
91774 */
91775 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
91776 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
91777
91778 /*
91779 * If ret is 0, either ____call_usermodehelper failed and the
91780 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
91781 index 176d825..77fa8ea 100644
91782 --- a/kernel/kprobes.c
91783 +++ b/kernel/kprobes.c
91784 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
91785 * kernel image and loaded module images reside. This is required
91786 * so x86_64 can correctly handle the %rip-relative fixups.
91787 */
91788 - kip->insns = module_alloc(PAGE_SIZE);
91789 + kip->insns = module_alloc_exec(PAGE_SIZE);
91790 if (!kip->insns) {
91791 kfree(kip);
91792 return NULL;
91793 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
91794 */
91795 if (!list_is_singular(&kprobe_insn_pages)) {
91796 list_del(&kip->list);
91797 - module_free(NULL, kip->insns);
91798 + module_free_exec(NULL, kip->insns);
91799 kfree(kip);
91800 }
91801 return 1;
91802 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
91803 {
91804 int i, err = 0;
91805 unsigned long offset = 0, size = 0;
91806 - char *modname, namebuf[128];
91807 + char *modname, namebuf[KSYM_NAME_LEN];
91808 const char *symbol_name;
91809 void *addr;
91810 struct kprobe_blackpoint *kb;
91811 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
91812 const char *sym = NULL;
91813 unsigned int i = *(loff_t *) v;
91814 unsigned long offset = 0;
91815 - char *modname, namebuf[128];
91816 + char *modname, namebuf[KSYM_NAME_LEN];
91817
91818 head = &kprobe_table[i];
91819 preempt_disable();
91820 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
91821 index d86fe89..d12fc66 100644
91822 --- a/kernel/lockdep.c
91823 +++ b/kernel/lockdep.c
91824 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
91825 /*
91826 * Various lockdep statistics:
91827 */
91828 -atomic_t chain_lookup_hits;
91829 -atomic_t chain_lookup_misses;
91830 -atomic_t hardirqs_on_events;
91831 -atomic_t hardirqs_off_events;
91832 -atomic_t redundant_hardirqs_on;
91833 -atomic_t redundant_hardirqs_off;
91834 -atomic_t softirqs_on_events;
91835 -atomic_t softirqs_off_events;
91836 -atomic_t redundant_softirqs_on;
91837 -atomic_t redundant_softirqs_off;
91838 -atomic_t nr_unused_locks;
91839 -atomic_t nr_cyclic_checks;
91840 -atomic_t nr_find_usage_forwards_checks;
91841 -atomic_t nr_find_usage_backwards_checks;
91842 +atomic_unchecked_t chain_lookup_hits;
91843 +atomic_unchecked_t chain_lookup_misses;
91844 +atomic_unchecked_t hardirqs_on_events;
91845 +atomic_unchecked_t hardirqs_off_events;
91846 +atomic_unchecked_t redundant_hardirqs_on;
91847 +atomic_unchecked_t redundant_hardirqs_off;
91848 +atomic_unchecked_t softirqs_on_events;
91849 +atomic_unchecked_t softirqs_off_events;
91850 +atomic_unchecked_t redundant_softirqs_on;
91851 +atomic_unchecked_t redundant_softirqs_off;
91852 +atomic_unchecked_t nr_unused_locks;
91853 +atomic_unchecked_t nr_cyclic_checks;
91854 +atomic_unchecked_t nr_find_usage_forwards_checks;
91855 +atomic_unchecked_t nr_find_usage_backwards_checks;
91856 #endif
91857
91858 /*
91859 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
91860 int i;
91861 #endif
91862
91863 +#ifdef CONFIG_PAX_KERNEXEC
91864 + start = ktla_ktva(start);
91865 +#endif
91866 +
91867 /*
91868 * static variable?
91869 */
91870 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
91871 */
91872 for_each_possible_cpu(i) {
91873 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
91874 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
91875 - + per_cpu_offset(i);
91876 + end = start + PERCPU_ENOUGH_ROOM;
91877
91878 if ((addr >= start) && (addr < end))
91879 return 1;
91880 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
91881 if (!static_obj(lock->key)) {
91882 debug_locks_off();
91883 printk("INFO: trying to register non-static key.\n");
91884 + printk("lock:%pS key:%pS.\n", lock, lock->key);
91885 printk("the code is fine but needs lockdep annotation.\n");
91886 printk("turning off the locking correctness validator.\n");
91887 dump_stack();
91888 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
91889 if (!class)
91890 return 0;
91891 }
91892 - debug_atomic_inc((atomic_t *)&class->ops);
91893 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
91894 if (very_verbose(class)) {
91895 printk("\nacquire class [%p] %s", class->key, class->name);
91896 if (class->name_version > 1)
91897 diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
91898 index a2ee95a..092f0f2 100644
91899 --- a/kernel/lockdep_internals.h
91900 +++ b/kernel/lockdep_internals.h
91901 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
91902 /*
91903 * Various lockdep statistics:
91904 */
91905 -extern atomic_t chain_lookup_hits;
91906 -extern atomic_t chain_lookup_misses;
91907 -extern atomic_t hardirqs_on_events;
91908 -extern atomic_t hardirqs_off_events;
91909 -extern atomic_t redundant_hardirqs_on;
91910 -extern atomic_t redundant_hardirqs_off;
91911 -extern atomic_t softirqs_on_events;
91912 -extern atomic_t softirqs_off_events;
91913 -extern atomic_t redundant_softirqs_on;
91914 -extern atomic_t redundant_softirqs_off;
91915 -extern atomic_t nr_unused_locks;
91916 -extern atomic_t nr_cyclic_checks;
91917 -extern atomic_t nr_cyclic_check_recursions;
91918 -extern atomic_t nr_find_usage_forwards_checks;
91919 -extern atomic_t nr_find_usage_forwards_recursions;
91920 -extern atomic_t nr_find_usage_backwards_checks;
91921 -extern atomic_t nr_find_usage_backwards_recursions;
91922 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
91923 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
91924 -# define debug_atomic_read(ptr) atomic_read(ptr)
91925 +extern atomic_unchecked_t chain_lookup_hits;
91926 +extern atomic_unchecked_t chain_lookup_misses;
91927 +extern atomic_unchecked_t hardirqs_on_events;
91928 +extern atomic_unchecked_t hardirqs_off_events;
91929 +extern atomic_unchecked_t redundant_hardirqs_on;
91930 +extern atomic_unchecked_t redundant_hardirqs_off;
91931 +extern atomic_unchecked_t softirqs_on_events;
91932 +extern atomic_unchecked_t softirqs_off_events;
91933 +extern atomic_unchecked_t redundant_softirqs_on;
91934 +extern atomic_unchecked_t redundant_softirqs_off;
91935 +extern atomic_unchecked_t nr_unused_locks;
91936 +extern atomic_unchecked_t nr_cyclic_checks;
91937 +extern atomic_unchecked_t nr_cyclic_check_recursions;
91938 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
91939 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
91940 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
91941 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
91942 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
91943 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
91944 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
91945 #else
91946 # define debug_atomic_inc(ptr) do { } while (0)
91947 # define debug_atomic_dec(ptr) do { } while (0)
91948 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
91949 index d4aba4f..02a353f 100644
91950 --- a/kernel/lockdep_proc.c
91951 +++ b/kernel/lockdep_proc.c
91952 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
91953
91954 static void print_name(struct seq_file *m, struct lock_class *class)
91955 {
91956 - char str[128];
91957 + char str[KSYM_NAME_LEN];
91958 const char *name = class->name;
91959
91960 if (!name) {
91961 diff --git a/kernel/module.c b/kernel/module.c
91962 index 4b270e6..2efdb65 100644
91963 --- a/kernel/module.c
91964 +++ b/kernel/module.c
91965 @@ -55,6 +55,7 @@
91966 #include <linux/async.h>
91967 #include <linux/percpu.h>
91968 #include <linux/kmemleak.h>
91969 +#include <linux/grsecurity.h>
91970
91971 #define CREATE_TRACE_POINTS
91972 #include <trace/events/module.h>
91973 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
91974 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
91975
91976 /* Bounds of module allocation, for speeding __module_address */
91977 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
91978 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
91979 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
91980
91981 int register_module_notifier(struct notifier_block * nb)
91982 {
91983 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
91984 return true;
91985
91986 list_for_each_entry_rcu(mod, &modules, list) {
91987 - struct symsearch arr[] = {
91988 + struct symsearch modarr[] = {
91989 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
91990 NOT_GPL_ONLY, false },
91991 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
91992 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
91993 #endif
91994 };
91995
91996 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
91997 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
91998 return true;
91999 }
92000 return false;
92001 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
92002 void *ptr;
92003 int cpu;
92004
92005 - if (align > PAGE_SIZE) {
92006 + if (align-1 >= PAGE_SIZE) {
92007 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
92008 name, align, PAGE_SIZE);
92009 align = PAGE_SIZE;
92010 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
92011 * /sys/module/foo/sections stuff
92012 * J. Corbet <corbet@lwn.net>
92013 */
92014 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
92015 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
92016
92017 static inline bool sect_empty(const Elf_Shdr *sect)
92018 {
92019 @@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
92020 destroy_params(mod->kp, mod->num_kp);
92021
92022 /* This may be NULL, but that's OK */
92023 - module_free(mod, mod->module_init);
92024 + module_free(mod, mod->module_init_rw);
92025 + module_free_exec(mod, mod->module_init_rx);
92026 kfree(mod->args);
92027 if (mod->percpu)
92028 percpu_modfree(mod->percpu);
92029 @@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
92030 percpu_modfree(mod->refptr);
92031 #endif
92032 /* Free lock-classes: */
92033 - lockdep_free_key_range(mod->module_core, mod->core_size);
92034 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
92035 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
92036
92037 /* Finally, free the core (containing the module structure) */
92038 - module_free(mod, mod->module_core);
92039 + module_free_exec(mod, mod->module_core_rx);
92040 + module_free(mod, mod->module_core_rw);
92041
92042 #ifdef CONFIG_MPU
92043 update_protections(current->mm);
92044 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
92045 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
92046 int ret = 0;
92047 const struct kernel_symbol *ksym;
92048 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
92049 + int is_fs_load = 0;
92050 + int register_filesystem_found = 0;
92051 + char *p;
92052 +
92053 + p = strstr(mod->args, "grsec_modharden_fs");
92054 +
92055 + if (p) {
92056 + char *endptr = p + strlen("grsec_modharden_fs");
92057 + /* copy \0 as well */
92058 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
92059 + is_fs_load = 1;
92060 + }
92061 +#endif
92062 +
92063
92064 for (i = 1; i < n; i++) {
92065 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
92066 + const char *name = strtab + sym[i].st_name;
92067 +
92068 + /* it's a real shame this will never get ripped and copied
92069 + upstream! ;(
92070 + */
92071 + if (is_fs_load && !strcmp(name, "register_filesystem"))
92072 + register_filesystem_found = 1;
92073 +#endif
92074 switch (sym[i].st_shndx) {
92075 case SHN_COMMON:
92076 /* We compiled with -fno-common. These are not
92077 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
92078 strtab + sym[i].st_name, mod);
92079 /* Ok if resolved. */
92080 if (ksym) {
92081 + pax_open_kernel();
92082 sym[i].st_value = ksym->value;
92083 + pax_close_kernel();
92084 break;
92085 }
92086
92087 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
92088 secbase = (unsigned long)mod->percpu;
92089 else
92090 secbase = sechdrs[sym[i].st_shndx].sh_addr;
92091 + pax_open_kernel();
92092 sym[i].st_value += secbase;
92093 + pax_close_kernel();
92094 break;
92095 }
92096 }
92097
92098 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
92099 + if (is_fs_load && !register_filesystem_found) {
92100 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
92101 + ret = -EPERM;
92102 + }
92103 +#endif
92104 +
92105 return ret;
92106 }
92107
92108 @@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
92109 || s->sh_entsize != ~0UL
92110 || strstarts(secstrings + s->sh_name, ".init"))
92111 continue;
92112 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
92113 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92114 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
92115 + else
92116 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
92117 DEBUGP("\t%s\n", secstrings + s->sh_name);
92118 }
92119 - if (m == 0)
92120 - mod->core_text_size = mod->core_size;
92121 }
92122
92123 DEBUGP("Init section allocation order:\n");
92124 @@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
92125 || s->sh_entsize != ~0UL
92126 || !strstarts(secstrings + s->sh_name, ".init"))
92127 continue;
92128 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
92129 - | INIT_OFFSET_MASK);
92130 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92131 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
92132 + else
92133 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
92134 + s->sh_entsize |= INIT_OFFSET_MASK;
92135 DEBUGP("\t%s\n", secstrings + s->sh_name);
92136 }
92137 - if (m == 0)
92138 - mod->init_text_size = mod->init_size;
92139 }
92140 }
92141
92142 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
92143
92144 /* As per nm */
92145 static char elf_type(const Elf_Sym *sym,
92146 - Elf_Shdr *sechdrs,
92147 - const char *secstrings,
92148 - struct module *mod)
92149 + const Elf_Shdr *sechdrs,
92150 + const char *secstrings)
92151 {
92152 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
92153 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
92154 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
92155
92156 /* Put symbol section at end of init part of module. */
92157 symsect->sh_flags |= SHF_ALLOC;
92158 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
92159 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
92160 symindex) | INIT_OFFSET_MASK;
92161 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
92162
92163 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
92164 }
92165
92166 /* Append room for core symbols at end of core part. */
92167 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
92168 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
92169 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
92170 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
92171
92172 /* Put string table section at end of init part of module. */
92173 strsect->sh_flags |= SHF_ALLOC;
92174 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
92175 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
92176 strindex) | INIT_OFFSET_MASK;
92177 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
92178
92179 /* Append room for core symbols' strings at end of core part. */
92180 - *pstroffs = mod->core_size;
92181 + *pstroffs = mod->core_size_rx;
92182 __set_bit(0, strmap);
92183 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
92184 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
92185
92186 return symoffs;
92187 }
92188 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
92189 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
92190 mod->strtab = (void *)sechdrs[strindex].sh_addr;
92191
92192 + pax_open_kernel();
92193 +
92194 /* Set types up while we still have access to sections. */
92195 for (i = 0; i < mod->num_symtab; i++)
92196 mod->symtab[i].st_info
92197 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
92198 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
92199
92200 - mod->core_symtab = dst = mod->module_core + symoffs;
92201 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
92202 src = mod->symtab;
92203 *dst = *src;
92204 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
92205 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
92206 }
92207 mod->core_num_syms = ndst;
92208
92209 - mod->core_strtab = s = mod->module_core + stroffs;
92210 + mod->core_strtab = s = mod->module_core_rx + stroffs;
92211 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
92212 if (test_bit(i, strmap))
92213 *++s = mod->strtab[i];
92214 +
92215 + pax_close_kernel();
92216 }
92217 #else
92218 static inline unsigned long layout_symtab(struct module *mod,
92219 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
92220 #endif
92221 }
92222
92223 -static void *module_alloc_update_bounds(unsigned long size)
92224 +static void *module_alloc_update_bounds_rw(unsigned long size)
92225 {
92226 void *ret = module_alloc(size);
92227
92228 if (ret) {
92229 /* Update module bounds. */
92230 - if ((unsigned long)ret < module_addr_min)
92231 - module_addr_min = (unsigned long)ret;
92232 - if ((unsigned long)ret + size > module_addr_max)
92233 - module_addr_max = (unsigned long)ret + size;
92234 + if ((unsigned long)ret < module_addr_min_rw)
92235 + module_addr_min_rw = (unsigned long)ret;
92236 + if ((unsigned long)ret + size > module_addr_max_rw)
92237 + module_addr_max_rw = (unsigned long)ret + size;
92238 + }
92239 + return ret;
92240 +}
92241 +
92242 +static void *module_alloc_update_bounds_rx(unsigned long size)
92243 +{
92244 + void *ret = module_alloc_exec(size);
92245 +
92246 + if (ret) {
92247 + /* Update module bounds. */
92248 + if ((unsigned long)ret < module_addr_min_rx)
92249 + module_addr_min_rx = (unsigned long)ret;
92250 + if ((unsigned long)ret + size > module_addr_max_rx)
92251 + module_addr_max_rx = (unsigned long)ret + size;
92252 }
92253 return ret;
92254 }
92255 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
92256 unsigned int i;
92257
92258 /* only scan the sections containing data */
92259 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
92260 - (unsigned long)mod->module_core,
92261 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
92262 + (unsigned long)mod->module_core_rw,
92263 sizeof(struct module), GFP_KERNEL);
92264
92265 for (i = 1; i < hdr->e_shnum; i++) {
92266 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
92267 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
92268 continue;
92269
92270 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
92271 - (unsigned long)mod->module_core,
92272 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
92273 + (unsigned long)mod->module_core_rw,
92274 sechdrs[i].sh_size, GFP_KERNEL);
92275 }
92276 }
92277 @@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
92278 Elf_Ehdr *hdr;
92279 Elf_Shdr *sechdrs;
92280 char *secstrings, *args, *modmagic, *strtab = NULL;
92281 - char *staging;
92282 + char *staging, *license;
92283 unsigned int i;
92284 unsigned int symindex = 0;
92285 unsigned int strindex = 0;
92286 @@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
92287 goto free_hdr;
92288 }
92289
92290 + license = get_modinfo(sechdrs, infoindex, "license");
92291 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
92292 + if (!license || !license_is_gpl_compatible(license)) {
92293 + err = -ENOEXEC;
92294 + goto free_hdr;
92295 + }
92296 +#endif
92297 +
92298 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
92299 /* This is allowed: modprobe --force will invalidate it. */
92300 if (!modmagic) {
92301 @@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
92302 secstrings, &stroffs, strmap);
92303
92304 /* Do the allocs. */
92305 - ptr = module_alloc_update_bounds(mod->core_size);
92306 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
92307 /*
92308 * The pointer to this block is stored in the module structure
92309 * which is inside the block. Just mark it as not being a
92310 @@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
92311 err = -ENOMEM;
92312 goto free_percpu;
92313 }
92314 - memset(ptr, 0, mod->core_size);
92315 - mod->module_core = ptr;
92316 + memset(ptr, 0, mod->core_size_rw);
92317 + mod->module_core_rw = ptr;
92318
92319 - ptr = module_alloc_update_bounds(mod->init_size);
92320 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
92321 /*
92322 * The pointer to this block is stored in the module structure
92323 * which is inside the block. This block doesn't need to be
92324 * scanned as it contains data and code that will be freed
92325 * after the module is initialized.
92326 */
92327 - kmemleak_ignore(ptr);
92328 - if (!ptr && mod->init_size) {
92329 + kmemleak_not_leak(ptr);
92330 + if (!ptr && mod->init_size_rw) {
92331 err = -ENOMEM;
92332 - goto free_core;
92333 + goto free_core_rw;
92334 }
92335 - memset(ptr, 0, mod->init_size);
92336 - mod->module_init = ptr;
92337 + memset(ptr, 0, mod->init_size_rw);
92338 + mod->module_init_rw = ptr;
92339 +
92340 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
92341 + kmemleak_not_leak(ptr);
92342 + if (!ptr) {
92343 + err = -ENOMEM;
92344 + goto free_init_rw;
92345 + }
92346 +
92347 + pax_open_kernel();
92348 + memset(ptr, 0, mod->core_size_rx);
92349 + pax_close_kernel();
92350 + mod->module_core_rx = ptr;
92351 +
92352 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
92353 + kmemleak_not_leak(ptr);
92354 + if (!ptr && mod->init_size_rx) {
92355 + err = -ENOMEM;
92356 + goto free_core_rx;
92357 + }
92358 +
92359 + pax_open_kernel();
92360 + memset(ptr, 0, mod->init_size_rx);
92361 + pax_close_kernel();
92362 + mod->module_init_rx = ptr;
92363
92364 /* Transfer each section which specifies SHF_ALLOC */
92365 DEBUGP("final section addresses:\n");
92366 @@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
92367 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
92368 continue;
92369
92370 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
92371 - dest = mod->module_init
92372 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92373 - else
92374 - dest = mod->module_core + sechdrs[i].sh_entsize;
92375 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
92376 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
92377 + dest = mod->module_init_rw
92378 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92379 + else
92380 + dest = mod->module_init_rx
92381 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92382 + } else {
92383 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
92384 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
92385 + else
92386 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
92387 + }
92388
92389 - if (sechdrs[i].sh_type != SHT_NOBITS)
92390 - memcpy(dest, (void *)sechdrs[i].sh_addr,
92391 - sechdrs[i].sh_size);
92392 + if (sechdrs[i].sh_type != SHT_NOBITS) {
92393 +
92394 +#ifdef CONFIG_PAX_KERNEXEC
92395 +#ifdef CONFIG_X86_64
92396 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
92397 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
92398 +#endif
92399 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
92400 + pax_open_kernel();
92401 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
92402 + pax_close_kernel();
92403 + } else
92404 +#endif
92405 +
92406 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
92407 + }
92408 /* Update sh_addr to point to copy in image. */
92409 - sechdrs[i].sh_addr = (unsigned long)dest;
92410 +
92411 +#ifdef CONFIG_PAX_KERNEXEC
92412 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
92413 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
92414 + else
92415 +#endif
92416 +
92417 + sechdrs[i].sh_addr = (unsigned long)dest;
92418 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
92419 }
92420 /* Module has been moved. */
92421 @@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
92422 mod->name);
92423 if (!mod->refptr) {
92424 err = -ENOMEM;
92425 - goto free_init;
92426 + goto free_init_rx;
92427 }
92428 #endif
92429 /* Now we've moved module, initialize linked lists, etc. */
92430 @@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
92431 goto free_unload;
92432
92433 /* Set up license info based on the info section */
92434 - set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
92435 + set_license(mod, license);
92436
92437 /*
92438 * ndiswrapper is under GPL by itself, but loads proprietary modules.
92439 @@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
92440 /* Set up MODINFO_ATTR fields */
92441 setup_modinfo(mod, sechdrs, infoindex);
92442
92443 + mod->args = args;
92444 +
92445 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
92446 + {
92447 + char *p, *p2;
92448 +
92449 + if (strstr(mod->args, "grsec_modharden_netdev")) {
92450 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
92451 + err = -EPERM;
92452 + goto cleanup;
92453 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
92454 + p += strlen("grsec_modharden_normal");
92455 + p2 = strstr(p, "_");
92456 + if (p2) {
92457 + *p2 = '\0';
92458 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
92459 + *p2 = '_';
92460 + }
92461 + err = -EPERM;
92462 + goto cleanup;
92463 + }
92464 + }
92465 +#endif
92466 +
92467 +
92468 /* Fix up syms, so that st_value is a pointer to location. */
92469 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
92470 mod);
92471 @@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
92472
92473 /* Now do relocations. */
92474 for (i = 1; i < hdr->e_shnum; i++) {
92475 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
92476 unsigned int info = sechdrs[i].sh_info;
92477 + strtab = (char *)sechdrs[strindex].sh_addr;
92478
92479 /* Not a valid relocation section? */
92480 if (info >= hdr->e_shnum)
92481 @@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
92482 * Do it before processing of module parameters, so the module
92483 * can provide parameter accessor functions of its own.
92484 */
92485 - if (mod->module_init)
92486 - flush_icache_range((unsigned long)mod->module_init,
92487 - (unsigned long)mod->module_init
92488 - + mod->init_size);
92489 - flush_icache_range((unsigned long)mod->module_core,
92490 - (unsigned long)mod->module_core + mod->core_size);
92491 + if (mod->module_init_rx)
92492 + flush_icache_range((unsigned long)mod->module_init_rx,
92493 + (unsigned long)mod->module_init_rx
92494 + + mod->init_size_rx);
92495 + flush_icache_range((unsigned long)mod->module_core_rx,
92496 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
92497
92498 set_fs(old_fs);
92499
92500 - mod->args = args;
92501 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
92502 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
92503 mod->name);
92504 @@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
92505 free_unload:
92506 module_unload_free(mod);
92507 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
92508 + free_init_rx:
92509 percpu_modfree(mod->refptr);
92510 - free_init:
92511 #endif
92512 - module_free(mod, mod->module_init);
92513 - free_core:
92514 - module_free(mod, mod->module_core);
92515 + module_free_exec(mod, mod->module_init_rx);
92516 + free_core_rx:
92517 + module_free_exec(mod, mod->module_core_rx);
92518 + free_init_rw:
92519 + module_free(mod, mod->module_init_rw);
92520 + free_core_rw:
92521 + module_free(mod, mod->module_core_rw);
92522 /* mod will be freed with core. Don't access it beyond this line! */
92523 free_percpu:
92524 if (percpu)
92525 @@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
92526 mod->symtab = mod->core_symtab;
92527 mod->strtab = mod->core_strtab;
92528 #endif
92529 - module_free(mod, mod->module_init);
92530 - mod->module_init = NULL;
92531 - mod->init_size = 0;
92532 - mod->init_text_size = 0;
92533 + module_free(mod, mod->module_init_rw);
92534 + module_free_exec(mod, mod->module_init_rx);
92535 + mod->module_init_rw = NULL;
92536 + mod->module_init_rx = NULL;
92537 + mod->init_size_rw = 0;
92538 + mod->init_size_rx = 0;
92539 mutex_unlock(&module_mutex);
92540
92541 return 0;
92542 @@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
92543 unsigned long nextval;
92544
92545 /* At worse, next value is at end of module */
92546 - if (within_module_init(addr, mod))
92547 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
92548 + if (within_module_init_rx(addr, mod))
92549 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
92550 + else if (within_module_init_rw(addr, mod))
92551 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
92552 + else if (within_module_core_rx(addr, mod))
92553 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
92554 + else if (within_module_core_rw(addr, mod))
92555 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
92556 else
92557 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
92558 + return NULL;
92559
92560 /* Scan for closest preceeding symbol, and next symbol. (ELF
92561 starts real symbols at 1). */
92562 @@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
92563 char buf[8];
92564
92565 seq_printf(m, "%s %u",
92566 - mod->name, mod->init_size + mod->core_size);
92567 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
92568 print_unload_info(m, mod);
92569
92570 /* Informative for users. */
92571 @@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
92572 mod->state == MODULE_STATE_COMING ? "Loading":
92573 "Live");
92574 /* Used by oprofile and other similar tools. */
92575 - seq_printf(m, " 0x%p", mod->module_core);
92576 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
92577
92578 /* Taints info */
92579 if (mod->taints)
92580 @@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
92581
92582 static int __init proc_modules_init(void)
92583 {
92584 +#ifndef CONFIG_GRKERNSEC_HIDESYM
92585 +#ifdef CONFIG_GRKERNSEC_PROC_USER
92586 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92587 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92588 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
92589 +#else
92590 proc_create("modules", 0, NULL, &proc_modules_operations);
92591 +#endif
92592 +#else
92593 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92594 +#endif
92595 return 0;
92596 }
92597 module_init(proc_modules_init);
92598 @@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
92599 {
92600 struct module *mod;
92601
92602 - if (addr < module_addr_min || addr > module_addr_max)
92603 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
92604 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
92605 return NULL;
92606
92607 list_for_each_entry_rcu(mod, &modules, list)
92608 - if (within_module_core(addr, mod)
92609 - || within_module_init(addr, mod))
92610 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
92611 return mod;
92612 return NULL;
92613 }
92614 @@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
92615 */
92616 struct module *__module_text_address(unsigned long addr)
92617 {
92618 - struct module *mod = __module_address(addr);
92619 + struct module *mod;
92620 +
92621 +#ifdef CONFIG_X86_32
92622 + addr = ktla_ktva(addr);
92623 +#endif
92624 +
92625 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
92626 + return NULL;
92627 +
92628 + mod = __module_address(addr);
92629 +
92630 if (mod) {
92631 /* Make sure it's within the text section. */
92632 - if (!within(addr, mod->module_init, mod->init_text_size)
92633 - && !within(addr, mod->module_core, mod->core_text_size))
92634 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
92635 mod = NULL;
92636 }
92637 return mod;
92638 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
92639 index ec815a9..fe46e99 100644
92640 --- a/kernel/mutex-debug.c
92641 +++ b/kernel/mutex-debug.c
92642 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
92643 }
92644
92645 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92646 - struct thread_info *ti)
92647 + struct task_struct *task)
92648 {
92649 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
92650
92651 /* Mark the current thread as blocked on the lock: */
92652 - ti->task->blocked_on = waiter;
92653 + task->blocked_on = waiter;
92654 }
92655
92656 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92657 - struct thread_info *ti)
92658 + struct task_struct *task)
92659 {
92660 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
92661 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
92662 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
92663 - ti->task->blocked_on = NULL;
92664 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
92665 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
92666 + task->blocked_on = NULL;
92667
92668 list_del_init(&waiter->list);
92669 waiter->task = NULL;
92670 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
92671 return;
92672
92673 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
92674 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
92675 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
92676 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
92677 mutex_clear_owner(lock);
92678 }
92679 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
92680 index 6b2d735..372d3c4 100644
92681 --- a/kernel/mutex-debug.h
92682 +++ b/kernel/mutex-debug.h
92683 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
92684 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
92685 extern void debug_mutex_add_waiter(struct mutex *lock,
92686 struct mutex_waiter *waiter,
92687 - struct thread_info *ti);
92688 + struct task_struct *task);
92689 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92690 - struct thread_info *ti);
92691 + struct task_struct *task);
92692 extern void debug_mutex_unlock(struct mutex *lock);
92693 extern void debug_mutex_init(struct mutex *lock, const char *name,
92694 struct lock_class_key *key);
92695
92696 static inline void mutex_set_owner(struct mutex *lock)
92697 {
92698 - lock->owner = current_thread_info();
92699 + lock->owner = current;
92700 }
92701
92702 static inline void mutex_clear_owner(struct mutex *lock)
92703 diff --git a/kernel/mutex.c b/kernel/mutex.c
92704 index f85644c..5ee9f77 100644
92705 --- a/kernel/mutex.c
92706 +++ b/kernel/mutex.c
92707 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92708 */
92709
92710 for (;;) {
92711 - struct thread_info *owner;
92712 + struct task_struct *owner;
92713
92714 /*
92715 * If we own the BKL, then don't spin. The owner of
92716 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92717 spin_lock_mutex(&lock->wait_lock, flags);
92718
92719 debug_mutex_lock_common(lock, &waiter);
92720 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
92721 + debug_mutex_add_waiter(lock, &waiter, task);
92722
92723 /* add waiting tasks to the end of the waitqueue (FIFO): */
92724 list_add_tail(&waiter.list, &lock->wait_list);
92725 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92726 * TASK_UNINTERRUPTIBLE case.)
92727 */
92728 if (unlikely(signal_pending_state(state, task))) {
92729 - mutex_remove_waiter(lock, &waiter,
92730 - task_thread_info(task));
92731 + mutex_remove_waiter(lock, &waiter, task);
92732 mutex_release(&lock->dep_map, 1, ip);
92733 spin_unlock_mutex(&lock->wait_lock, flags);
92734
92735 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92736 done:
92737 lock_acquired(&lock->dep_map, ip);
92738 /* got the lock - rejoice! */
92739 - mutex_remove_waiter(lock, &waiter, current_thread_info());
92740 + mutex_remove_waiter(lock, &waiter, task);
92741 mutex_set_owner(lock);
92742
92743 /* set it to 0 if there are no waiters left: */
92744 diff --git a/kernel/mutex.h b/kernel/mutex.h
92745 index 67578ca..4115fbf 100644
92746 --- a/kernel/mutex.h
92747 +++ b/kernel/mutex.h
92748 @@ -19,7 +19,7 @@
92749 #ifdef CONFIG_SMP
92750 static inline void mutex_set_owner(struct mutex *lock)
92751 {
92752 - lock->owner = current_thread_info();
92753 + lock->owner = current;
92754 }
92755
92756 static inline void mutex_clear_owner(struct mutex *lock)
92757 diff --git a/kernel/panic.c b/kernel/panic.c
92758 index 96b45d0..ff70a46 100644
92759 --- a/kernel/panic.c
92760 +++ b/kernel/panic.c
92761 @@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
92762 va_end(args);
92763 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
92764 #ifdef CONFIG_DEBUG_BUGVERBOSE
92765 - dump_stack();
92766 + /*
92767 + * Avoid nested stack-dumping if a panic occurs during oops processing
92768 + */
92769 + if (!oops_in_progress)
92770 + dump_stack();
92771 #endif
92772
92773 /*
92774 @@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
92775 const char *board;
92776
92777 printk(KERN_WARNING "------------[ cut here ]------------\n");
92778 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
92779 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
92780 board = dmi_get_system_info(DMI_PRODUCT_NAME);
92781 if (board)
92782 printk(KERN_WARNING "Hardware name: %s\n", board);
92783 @@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92784 */
92785 void __stack_chk_fail(void)
92786 {
92787 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
92788 + dump_stack();
92789 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92790 __builtin_return_address(0));
92791 }
92792 EXPORT_SYMBOL(__stack_chk_fail);
92793 diff --git a/kernel/params.c b/kernel/params.c
92794 index d656c27..21e452c 100644
92795 --- a/kernel/params.c
92796 +++ b/kernel/params.c
92797 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
92798 return ret;
92799 }
92800
92801 -static struct sysfs_ops module_sysfs_ops = {
92802 +static const struct sysfs_ops module_sysfs_ops = {
92803 .show = module_attr_show,
92804 .store = module_attr_store,
92805 };
92806 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
92807 return 0;
92808 }
92809
92810 -static struct kset_uevent_ops module_uevent_ops = {
92811 +static const struct kset_uevent_ops module_uevent_ops = {
92812 .filter = uevent_filter,
92813 };
92814
92815 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
92816 index 37ebc14..9c121d9 100644
92817 --- a/kernel/perf_event.c
92818 +++ b/kernel/perf_event.c
92819 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
92820 */
92821 int sysctl_perf_event_sample_rate __read_mostly = 100000;
92822
92823 -static atomic64_t perf_event_id;
92824 +static atomic64_unchecked_t perf_event_id;
92825
92826 /*
92827 * Lock for (sysadmin-configurable) event reservations:
92828 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
92829 * In order to keep per-task stats reliable we need to flip the event
92830 * values when we flip the contexts.
92831 */
92832 - value = atomic64_read(&next_event->count);
92833 - value = atomic64_xchg(&event->count, value);
92834 - atomic64_set(&next_event->count, value);
92835 + value = atomic64_read_unchecked(&next_event->count);
92836 + value = atomic64_xchg_unchecked(&event->count, value);
92837 + atomic64_set_unchecked(&next_event->count, value);
92838
92839 swap(event->total_time_enabled, next_event->total_time_enabled);
92840 swap(event->total_time_running, next_event->total_time_running);
92841 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
92842 update_event_times(event);
92843 }
92844
92845 - return atomic64_read(&event->count);
92846 + return atomic64_read_unchecked(&event->count);
92847 }
92848
92849 /*
92850 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
92851 values[n++] = 1 + leader->nr_siblings;
92852 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92853 values[n++] = leader->total_time_enabled +
92854 - atomic64_read(&leader->child_total_time_enabled);
92855 + atomic64_read_unchecked(&leader->child_total_time_enabled);
92856 }
92857 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92858 values[n++] = leader->total_time_running +
92859 - atomic64_read(&leader->child_total_time_running);
92860 + atomic64_read_unchecked(&leader->child_total_time_running);
92861 }
92862
92863 size = n * sizeof(u64);
92864 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
92865 values[n++] = perf_event_read_value(event);
92866 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92867 values[n++] = event->total_time_enabled +
92868 - atomic64_read(&event->child_total_time_enabled);
92869 + atomic64_read_unchecked(&event->child_total_time_enabled);
92870 }
92871 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92872 values[n++] = event->total_time_running +
92873 - atomic64_read(&event->child_total_time_running);
92874 + atomic64_read_unchecked(&event->child_total_time_running);
92875 }
92876 if (read_format & PERF_FORMAT_ID)
92877 values[n++] = primary_event_id(event);
92878 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
92879 static void perf_event_reset(struct perf_event *event)
92880 {
92881 (void)perf_event_read(event);
92882 - atomic64_set(&event->count, 0);
92883 + atomic64_set_unchecked(&event->count, 0);
92884 perf_event_update_userpage(event);
92885 }
92886
92887 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
92888 ++userpg->lock;
92889 barrier();
92890 userpg->index = perf_event_index(event);
92891 - userpg->offset = atomic64_read(&event->count);
92892 + userpg->offset = atomic64_read_unchecked(&event->count);
92893 if (event->state == PERF_EVENT_STATE_ACTIVE)
92894 - userpg->offset -= atomic64_read(&event->hw.prev_count);
92895 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
92896
92897 userpg->time_enabled = event->total_time_enabled +
92898 - atomic64_read(&event->child_total_time_enabled);
92899 + atomic64_read_unchecked(&event->child_total_time_enabled);
92900
92901 userpg->time_running = event->total_time_running +
92902 - atomic64_read(&event->child_total_time_running);
92903 + atomic64_read_unchecked(&event->child_total_time_running);
92904
92905 barrier();
92906 ++userpg->lock;
92907 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
92908 u64 values[4];
92909 int n = 0;
92910
92911 - values[n++] = atomic64_read(&event->count);
92912 + values[n++] = atomic64_read_unchecked(&event->count);
92913 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92914 values[n++] = event->total_time_enabled +
92915 - atomic64_read(&event->child_total_time_enabled);
92916 + atomic64_read_unchecked(&event->child_total_time_enabled);
92917 }
92918 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92919 values[n++] = event->total_time_running +
92920 - atomic64_read(&event->child_total_time_running);
92921 + atomic64_read_unchecked(&event->child_total_time_running);
92922 }
92923 if (read_format & PERF_FORMAT_ID)
92924 values[n++] = primary_event_id(event);
92925 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
92926 if (leader != event)
92927 leader->pmu->read(leader);
92928
92929 - values[n++] = atomic64_read(&leader->count);
92930 + values[n++] = atomic64_read_unchecked(&leader->count);
92931 if (read_format & PERF_FORMAT_ID)
92932 values[n++] = primary_event_id(leader);
92933
92934 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
92935 if (sub != event)
92936 sub->pmu->read(sub);
92937
92938 - values[n++] = atomic64_read(&sub->count);
92939 + values[n++] = atomic64_read_unchecked(&sub->count);
92940 if (read_format & PERF_FORMAT_ID)
92941 values[n++] = primary_event_id(sub);
92942
92943 @@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
92944 * need to add enough zero bytes after the string to handle
92945 * the 64bit alignment we do later.
92946 */
92947 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
92948 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
92949 if (!buf) {
92950 name = strncpy(tmp, "//enomem", sizeof(tmp));
92951 goto got_name;
92952 }
92953 - name = d_path(&file->f_path, buf, PATH_MAX);
92954 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
92955 if (IS_ERR(name)) {
92956 name = strncpy(tmp, "//toolong", sizeof(tmp));
92957 goto got_name;
92958 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
92959 {
92960 struct hw_perf_event *hwc = &event->hw;
92961
92962 - atomic64_add(nr, &event->count);
92963 + atomic64_add_unchecked(nr, &event->count);
92964
92965 if (!hwc->sample_period)
92966 return;
92967 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
92968 u64 now;
92969
92970 now = cpu_clock(cpu);
92971 - prev = atomic64_read(&event->hw.prev_count);
92972 - atomic64_set(&event->hw.prev_count, now);
92973 - atomic64_add(now - prev, &event->count);
92974 + prev = atomic64_read_unchecked(&event->hw.prev_count);
92975 + atomic64_set_unchecked(&event->hw.prev_count, now);
92976 + atomic64_add_unchecked(now - prev, &event->count);
92977 }
92978
92979 static int cpu_clock_perf_event_enable(struct perf_event *event)
92980 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
92981 struct hw_perf_event *hwc = &event->hw;
92982 int cpu = raw_smp_processor_id();
92983
92984 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
92985 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
92986 perf_swevent_start_hrtimer(event);
92987
92988 return 0;
92989 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
92990 u64 prev;
92991 s64 delta;
92992
92993 - prev = atomic64_xchg(&event->hw.prev_count, now);
92994 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
92995 delta = now - prev;
92996 - atomic64_add(delta, &event->count);
92997 + atomic64_add_unchecked(delta, &event->count);
92998 }
92999
93000 static int task_clock_perf_event_enable(struct perf_event *event)
93001 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
93002
93003 now = event->ctx->time;
93004
93005 - atomic64_set(&hwc->prev_count, now);
93006 + atomic64_set_unchecked(&hwc->prev_count, now);
93007
93008 perf_swevent_start_hrtimer(event);
93009
93010 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
93011 event->parent = parent_event;
93012
93013 event->ns = get_pid_ns(current->nsproxy->pid_ns);
93014 - event->id = atomic64_inc_return(&perf_event_id);
93015 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
93016
93017 event->state = PERF_EVENT_STATE_INACTIVE;
93018
93019 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
93020 if (child_event->attr.inherit_stat)
93021 perf_event_read_event(child_event, child);
93022
93023 - child_val = atomic64_read(&child_event->count);
93024 + child_val = atomic64_read_unchecked(&child_event->count);
93025
93026 /*
93027 * Add back the child's count to the parent's count:
93028 */
93029 - atomic64_add(child_val, &parent_event->count);
93030 - atomic64_add(child_event->total_time_enabled,
93031 + atomic64_add_unchecked(child_val, &parent_event->count);
93032 + atomic64_add_unchecked(child_event->total_time_enabled,
93033 &parent_event->child_total_time_enabled);
93034 - atomic64_add(child_event->total_time_running,
93035 + atomic64_add_unchecked(child_event->total_time_running,
93036 &parent_event->child_total_time_running);
93037
93038 /*
93039 diff --git a/kernel/pid.c b/kernel/pid.c
93040 index fce7198..4f23a7e 100644
93041 --- a/kernel/pid.c
93042 +++ b/kernel/pid.c
93043 @@ -33,6 +33,7 @@
93044 #include <linux/rculist.h>
93045 #include <linux/bootmem.h>
93046 #include <linux/hash.h>
93047 +#include <linux/security.h>
93048 #include <linux/pid_namespace.h>
93049 #include <linux/init_task.h>
93050 #include <linux/syscalls.h>
93051 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
93052
93053 int pid_max = PID_MAX_DEFAULT;
93054
93055 -#define RESERVED_PIDS 300
93056 +#define RESERVED_PIDS 500
93057
93058 int pid_max_min = RESERVED_PIDS + 1;
93059 int pid_max_max = PID_MAX_LIMIT;
93060 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
93061 */
93062 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
93063 {
93064 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
93065 + struct task_struct *task;
93066 +
93067 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
93068 +
93069 + if (gr_pid_is_chrooted(task))
93070 + return NULL;
93071 +
93072 + return task;
93073 }
93074
93075 struct task_struct *find_task_by_vpid(pid_t vnr)
93076 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
93077 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
93078 }
93079
93080 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
93081 +{
93082 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
93083 +}
93084 +
93085 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
93086 {
93087 struct pid *pid;
93088 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
93089 index 5c9dc22..d271117 100644
93090 --- a/kernel/posix-cpu-timers.c
93091 +++ b/kernel/posix-cpu-timers.c
93092 @@ -6,6 +6,7 @@
93093 #include <linux/posix-timers.h>
93094 #include <linux/errno.h>
93095 #include <linux/math64.h>
93096 +#include <linux/security.h>
93097 #include <asm/uaccess.h>
93098 #include <linux/kernel_stat.h>
93099 #include <trace/events/timer.h>
93100 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
93101
93102 static __init int init_posix_cpu_timers(void)
93103 {
93104 - struct k_clock process = {
93105 + static struct k_clock process = {
93106 .clock_getres = process_cpu_clock_getres,
93107 .clock_get = process_cpu_clock_get,
93108 .clock_set = do_posix_clock_nosettime,
93109 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
93110 .nsleep = process_cpu_nsleep,
93111 .nsleep_restart = process_cpu_nsleep_restart,
93112 };
93113 - struct k_clock thread = {
93114 + static struct k_clock thread = {
93115 .clock_getres = thread_cpu_clock_getres,
93116 .clock_get = thread_cpu_clock_get,
93117 .clock_set = do_posix_clock_nosettime,
93118 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
93119 index 5e76d22..cf1baeb 100644
93120 --- a/kernel/posix-timers.c
93121 +++ b/kernel/posix-timers.c
93122 @@ -42,6 +42,7 @@
93123 #include <linux/compiler.h>
93124 #include <linux/idr.h>
93125 #include <linux/posix-timers.h>
93126 +#include <linux/grsecurity.h>
93127 #include <linux/syscalls.h>
93128 #include <linux/wait.h>
93129 #include <linux/workqueue.h>
93130 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
93131 * which we beg off on and pass to do_sys_settimeofday().
93132 */
93133
93134 -static struct k_clock posix_clocks[MAX_CLOCKS];
93135 +static struct k_clock *posix_clocks[MAX_CLOCKS];
93136
93137 /*
93138 * These ones are defined below.
93139 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
93140 */
93141 #define CLOCK_DISPATCH(clock, call, arglist) \
93142 ((clock) < 0 ? posix_cpu_##call arglist : \
93143 - (posix_clocks[clock].call != NULL \
93144 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
93145 + (posix_clocks[clock]->call != NULL \
93146 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
93147
93148 /*
93149 * Default clock hook functions when the struct k_clock passed
93150 @@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
93151 struct timespec *tp)
93152 {
93153 tp->tv_sec = 0;
93154 - tp->tv_nsec = posix_clocks[which_clock].res;
93155 + tp->tv_nsec = posix_clocks[which_clock]->res;
93156 return 0;
93157 }
93158
93159 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
93160 return 0;
93161 if ((unsigned) which_clock >= MAX_CLOCKS)
93162 return 1;
93163 - if (posix_clocks[which_clock].clock_getres != NULL)
93164 + if (posix_clocks[which_clock] == NULL)
93165 return 0;
93166 - if (posix_clocks[which_clock].res != 0)
93167 + if (posix_clocks[which_clock]->clock_getres != NULL)
93168 + return 0;
93169 + if (posix_clocks[which_clock]->res != 0)
93170 return 0;
93171 return 1;
93172 }
93173 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
93174 */
93175 static __init int init_posix_timers(void)
93176 {
93177 - struct k_clock clock_realtime = {
93178 + static struct k_clock clock_realtime = {
93179 .clock_getres = hrtimer_get_res,
93180 };
93181 - struct k_clock clock_monotonic = {
93182 + static struct k_clock clock_monotonic = {
93183 .clock_getres = hrtimer_get_res,
93184 .clock_get = posix_ktime_get_ts,
93185 .clock_set = do_posix_clock_nosettime,
93186 };
93187 - struct k_clock clock_monotonic_raw = {
93188 + static struct k_clock clock_monotonic_raw = {
93189 .clock_getres = hrtimer_get_res,
93190 .clock_get = posix_get_monotonic_raw,
93191 .clock_set = do_posix_clock_nosettime,
93192 .timer_create = no_timer_create,
93193 .nsleep = no_nsleep,
93194 };
93195 - struct k_clock clock_realtime_coarse = {
93196 + static struct k_clock clock_realtime_coarse = {
93197 .clock_getres = posix_get_coarse_res,
93198 .clock_get = posix_get_realtime_coarse,
93199 .clock_set = do_posix_clock_nosettime,
93200 .timer_create = no_timer_create,
93201 .nsleep = no_nsleep,
93202 };
93203 - struct k_clock clock_monotonic_coarse = {
93204 + static struct k_clock clock_monotonic_coarse = {
93205 .clock_getres = posix_get_coarse_res,
93206 .clock_get = posix_get_monotonic_coarse,
93207 .clock_set = do_posix_clock_nosettime,
93208 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
93209 .nsleep = no_nsleep,
93210 };
93211
93212 + pax_track_stack();
93213 +
93214 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
93215 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
93216 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
93217 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
93218 return;
93219 }
93220
93221 - posix_clocks[clock_id] = *new_clock;
93222 + posix_clocks[clock_id] = new_clock;
93223 }
93224 EXPORT_SYMBOL_GPL(register_posix_clock);
93225
93226 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93227 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93228 return -EFAULT;
93229
93230 + /* only the CLOCK_REALTIME clock can be set, all other clocks
93231 + have their clock_set fptr set to a nosettime dummy function
93232 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93233 + call common_clock_set, which calls do_sys_settimeofday, which
93234 + we hook
93235 + */
93236 +
93237 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
93238 }
93239
93240 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
93241 index 04a9e90..bc355aa 100644
93242 --- a/kernel/power/hibernate.c
93243 +++ b/kernel/power/hibernate.c
93244 @@ -48,14 +48,14 @@ enum {
93245
93246 static int hibernation_mode = HIBERNATION_SHUTDOWN;
93247
93248 -static struct platform_hibernation_ops *hibernation_ops;
93249 +static const struct platform_hibernation_ops *hibernation_ops;
93250
93251 /**
93252 * hibernation_set_ops - set the global hibernate operations
93253 * @ops: the hibernation operations to use in subsequent hibernation transitions
93254 */
93255
93256 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
93257 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
93258 {
93259 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
93260 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
93261 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
93262 index e8b3370..484c2e4 100644
93263 --- a/kernel/power/poweroff.c
93264 +++ b/kernel/power/poweroff.c
93265 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
93266 .enable_mask = SYSRQ_ENABLE_BOOT,
93267 };
93268
93269 -static int pm_sysrq_init(void)
93270 +static int __init pm_sysrq_init(void)
93271 {
93272 register_sysrq_key('o', &sysrq_poweroff_op);
93273 return 0;
93274 diff --git a/kernel/power/process.c b/kernel/power/process.c
93275 index e7cd671..56d5f459 100644
93276 --- a/kernel/power/process.c
93277 +++ b/kernel/power/process.c
93278 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
93279 struct timeval start, end;
93280 u64 elapsed_csecs64;
93281 unsigned int elapsed_csecs;
93282 + bool timedout = false;
93283
93284 do_gettimeofday(&start);
93285
93286 end_time = jiffies + TIMEOUT;
93287 do {
93288 todo = 0;
93289 + if (time_after(jiffies, end_time))
93290 + timedout = true;
93291 read_lock(&tasklist_lock);
93292 do_each_thread(g, p) {
93293 if (frozen(p) || !freezeable(p))
93294 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
93295 * It is "frozen enough". If the task does wake
93296 * up, it will immediately call try_to_freeze.
93297 */
93298 - if (!task_is_stopped_or_traced(p) &&
93299 - !freezer_should_skip(p))
93300 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
93301 todo++;
93302 + if (timedout) {
93303 + printk(KERN_ERR "Task refusing to freeze:\n");
93304 + sched_show_task(p);
93305 + }
93306 + }
93307 } while_each_thread(g, p);
93308 read_unlock(&tasklist_lock);
93309 yield(); /* Yield is okay here */
93310 - if (time_after(jiffies, end_time))
93311 - break;
93312 - } while (todo);
93313 + } while (todo && !timedout);
93314
93315 do_gettimeofday(&end);
93316 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
93317 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
93318 index 40dd021..fb30ceb 100644
93319 --- a/kernel/power/suspend.c
93320 +++ b/kernel/power/suspend.c
93321 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
93322 [PM_SUSPEND_MEM] = "mem",
93323 };
93324
93325 -static struct platform_suspend_ops *suspend_ops;
93326 +static const struct platform_suspend_ops *suspend_ops;
93327
93328 /**
93329 * suspend_set_ops - Set the global suspend method table.
93330 * @ops: Pointer to ops structure.
93331 */
93332 -void suspend_set_ops(struct platform_suspend_ops *ops)
93333 +void suspend_set_ops(const struct platform_suspend_ops *ops)
93334 {
93335 mutex_lock(&pm_mutex);
93336 suspend_ops = ops;
93337 diff --git a/kernel/printk.c b/kernel/printk.c
93338 index 4cade47..4d17900 100644
93339 --- a/kernel/printk.c
93340 +++ b/kernel/printk.c
93341 @@ -33,6 +33,7 @@
93342 #include <linux/bootmem.h>
93343 #include <linux/syscalls.h>
93344 #include <linux/kexec.h>
93345 +#include <linux/syslog.h>
93346
93347 #include <asm/uaccess.h>
93348
93349 @@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
93350 }
93351 #endif
93352
93353 -/*
93354 - * Commands to do_syslog:
93355 - *
93356 - * 0 -- Close the log. Currently a NOP.
93357 - * 1 -- Open the log. Currently a NOP.
93358 - * 2 -- Read from the log.
93359 - * 3 -- Read all messages remaining in the ring buffer.
93360 - * 4 -- Read and clear all messages remaining in the ring buffer
93361 - * 5 -- Clear ring buffer.
93362 - * 6 -- Disable printk's to console
93363 - * 7 -- Enable printk's to console
93364 - * 8 -- Set level of messages printed to console
93365 - * 9 -- Return number of unread characters in the log buffer
93366 - * 10 -- Return size of the log buffer
93367 - */
93368 -int do_syslog(int type, char __user *buf, int len)
93369 +int do_syslog(int type, char __user *buf, int len, bool from_file)
93370 {
93371 unsigned i, j, limit, count;
93372 int do_clear = 0;
93373 char c;
93374 int error = 0;
93375
93376 - error = security_syslog(type);
93377 +#ifdef CONFIG_GRKERNSEC_DMESG
93378 + if (grsec_enable_dmesg &&
93379 + (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
93380 + !capable(CAP_SYS_ADMIN))
93381 + return -EPERM;
93382 +#endif
93383 +
93384 + error = security_syslog(type, from_file);
93385 if (error)
93386 return error;
93387
93388 switch (type) {
93389 - case 0: /* Close log */
93390 + case SYSLOG_ACTION_CLOSE: /* Close log */
93391 break;
93392 - case 1: /* Open log */
93393 + case SYSLOG_ACTION_OPEN: /* Open log */
93394 break;
93395 - case 2: /* Read from log */
93396 + case SYSLOG_ACTION_READ: /* Read from log */
93397 error = -EINVAL;
93398 if (!buf || len < 0)
93399 goto out;
93400 @@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
93401 if (!error)
93402 error = i;
93403 break;
93404 - case 4: /* Read/clear last kernel messages */
93405 + /* Read/clear last kernel messages */
93406 + case SYSLOG_ACTION_READ_CLEAR:
93407 do_clear = 1;
93408 /* FALL THRU */
93409 - case 3: /* Read last kernel messages */
93410 + /* Read last kernel messages */
93411 + case SYSLOG_ACTION_READ_ALL:
93412 error = -EINVAL;
93413 if (!buf || len < 0)
93414 goto out;
93415 @@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
93416 }
93417 }
93418 break;
93419 - case 5: /* Clear ring buffer */
93420 + /* Clear ring buffer */
93421 + case SYSLOG_ACTION_CLEAR:
93422 logged_chars = 0;
93423 break;
93424 - case 6: /* Disable logging to console */
93425 + /* Disable logging to console */
93426 + case SYSLOG_ACTION_CONSOLE_OFF:
93427 if (saved_console_loglevel == -1)
93428 saved_console_loglevel = console_loglevel;
93429 console_loglevel = minimum_console_loglevel;
93430 break;
93431 - case 7: /* Enable logging to console */
93432 + /* Enable logging to console */
93433 + case SYSLOG_ACTION_CONSOLE_ON:
93434 if (saved_console_loglevel != -1) {
93435 console_loglevel = saved_console_loglevel;
93436 saved_console_loglevel = -1;
93437 }
93438 break;
93439 - case 8: /* Set level of messages printed to console */
93440 + /* Set level of messages printed to console */
93441 + case SYSLOG_ACTION_CONSOLE_LEVEL:
93442 error = -EINVAL;
93443 if (len < 1 || len > 8)
93444 goto out;
93445 @@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
93446 saved_console_loglevel = -1;
93447 error = 0;
93448 break;
93449 - case 9: /* Number of chars in the log buffer */
93450 + /* Number of chars in the log buffer */
93451 + case SYSLOG_ACTION_SIZE_UNREAD:
93452 error = log_end - log_start;
93453 break;
93454 - case 10: /* Size of the log buffer */
93455 + /* Size of the log buffer */
93456 + case SYSLOG_ACTION_SIZE_BUFFER:
93457 error = log_buf_len;
93458 break;
93459 default:
93460 @@ -415,7 +416,7 @@ out:
93461
93462 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
93463 {
93464 - return do_syslog(type, buf, len);
93465 + return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
93466 }
93467
93468 /*
93469 diff --git a/kernel/profile.c b/kernel/profile.c
93470 index dfadc5b..7f59404 100644
93471 --- a/kernel/profile.c
93472 +++ b/kernel/profile.c
93473 @@ -39,7 +39,7 @@ struct profile_hit {
93474 /* Oprofile timer tick hook */
93475 static int (*timer_hook)(struct pt_regs *) __read_mostly;
93476
93477 -static atomic_t *prof_buffer;
93478 +static atomic_unchecked_t *prof_buffer;
93479 static unsigned long prof_len, prof_shift;
93480
93481 int prof_on __read_mostly;
93482 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
93483 hits[i].pc = 0;
93484 continue;
93485 }
93486 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93487 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93488 hits[i].hits = hits[i].pc = 0;
93489 }
93490 }
93491 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
93492 * Add the current hit(s) and flush the write-queue out
93493 * to the global buffer:
93494 */
93495 - atomic_add(nr_hits, &prof_buffer[pc]);
93496 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
93497 for (i = 0; i < NR_PROFILE_HIT; ++i) {
93498 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93499 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93500 hits[i].pc = hits[i].hits = 0;
93501 }
93502 out:
93503 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
93504 if (prof_on != type || !prof_buffer)
93505 return;
93506 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
93507 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93508 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93509 }
93510 #endif /* !CONFIG_SMP */
93511 EXPORT_SYMBOL_GPL(profile_hits);
93512 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
93513 return -EFAULT;
93514 buf++; p++; count--; read++;
93515 }
93516 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
93517 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
93518 if (copy_to_user(buf, (void *)pnt, count))
93519 return -EFAULT;
93520 read += count;
93521 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
93522 }
93523 #endif
93524 profile_discard_flip_buffers();
93525 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
93526 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
93527 return count;
93528 }
93529
93530 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
93531 index 05625f6..733bf70 100644
93532 --- a/kernel/ptrace.c
93533 +++ b/kernel/ptrace.c
93534 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
93535 return ret;
93536 }
93537
93538 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93539 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
93540 + unsigned int log)
93541 {
93542 const struct cred *cred = current_cred(), *tcred;
93543
93544 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93545 cred->gid != tcred->egid ||
93546 cred->gid != tcred->sgid ||
93547 cred->gid != tcred->gid) &&
93548 - !capable(CAP_SYS_PTRACE)) {
93549 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
93550 + (log && !capable(CAP_SYS_PTRACE)))
93551 + ) {
93552 rcu_read_unlock();
93553 return -EPERM;
93554 }
93555 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93556 smp_rmb();
93557 if (task->mm)
93558 dumpable = get_dumpable(task->mm);
93559 - if (!dumpable && !capable(CAP_SYS_PTRACE))
93560 + if (!dumpable &&
93561 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
93562 + (log && !capable(CAP_SYS_PTRACE))))
93563 return -EPERM;
93564
93565 return security_ptrace_access_check(task, mode);
93566 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
93567 {
93568 int err;
93569 task_lock(task);
93570 - err = __ptrace_may_access(task, mode);
93571 + err = __ptrace_may_access(task, mode, 0);
93572 + task_unlock(task);
93573 + return !err;
93574 +}
93575 +
93576 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
93577 +{
93578 + int err;
93579 + task_lock(task);
93580 + err = __ptrace_may_access(task, mode, 1);
93581 task_unlock(task);
93582 return !err;
93583 }
93584 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
93585 goto out;
93586
93587 task_lock(task);
93588 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
93589 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
93590 task_unlock(task);
93591 if (retval)
93592 goto unlock_creds;
93593 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
93594 goto unlock_tasklist;
93595
93596 task->ptrace = PT_PTRACED;
93597 - if (capable(CAP_SYS_PTRACE))
93598 + if (capable_nolog(CAP_SYS_PTRACE))
93599 task->ptrace |= PT_PTRACE_CAP;
93600
93601 __ptrace_link(task, current);
93602 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
93603 {
93604 int copied = 0;
93605
93606 + pax_track_stack();
93607 +
93608 while (len > 0) {
93609 char buf[128];
93610 int this_len, retval;
93611 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
93612 {
93613 int copied = 0;
93614
93615 + pax_track_stack();
93616 +
93617 while (len > 0) {
93618 char buf[128];
93619 int this_len, retval;
93620 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
93621 int ret = -EIO;
93622 siginfo_t siginfo;
93623
93624 + pax_track_stack();
93625 +
93626 switch (request) {
93627 case PTRACE_PEEKTEXT:
93628 case PTRACE_PEEKDATA:
93629 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
93630 ret = ptrace_setoptions(child, data);
93631 break;
93632 case PTRACE_GETEVENTMSG:
93633 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
93634 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
93635 break;
93636
93637 case PTRACE_GETSIGINFO:
93638 ret = ptrace_getsiginfo(child, &siginfo);
93639 if (!ret)
93640 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
93641 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
93642 &siginfo);
93643 break;
93644
93645 case PTRACE_SETSIGINFO:
93646 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
93647 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
93648 sizeof siginfo))
93649 ret = -EFAULT;
93650 else
93651 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
93652 goto out;
93653 }
93654
93655 + if (gr_handle_ptrace(child, request)) {
93656 + ret = -EPERM;
93657 + goto out_put_task_struct;
93658 + }
93659 +
93660 if (request == PTRACE_ATTACH) {
93661 ret = ptrace_attach(child);
93662 /*
93663 * Some architectures need to do book-keeping after
93664 * a ptrace attach.
93665 */
93666 - if (!ret)
93667 + if (!ret) {
93668 arch_ptrace_attach(child);
93669 + gr_audit_ptrace(child);
93670 + }
93671 goto out_put_task_struct;
93672 }
93673
93674 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
93675 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93676 if (copied != sizeof(tmp))
93677 return -EIO;
93678 - return put_user(tmp, (unsigned long __user *)data);
93679 + return put_user(tmp, (__force unsigned long __user *)data);
93680 }
93681
93682 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
93683 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93684 siginfo_t siginfo;
93685 int ret;
93686
93687 + pax_track_stack();
93688 +
93689 switch (request) {
93690 case PTRACE_PEEKTEXT:
93691 case PTRACE_PEEKDATA:
93692 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
93693 goto out;
93694 }
93695
93696 + if (gr_handle_ptrace(child, request)) {
93697 + ret = -EPERM;
93698 + goto out_put_task_struct;
93699 + }
93700 +
93701 if (request == PTRACE_ATTACH) {
93702 ret = ptrace_attach(child);
93703 /*
93704 * Some architectures need to do book-keeping after
93705 * a ptrace attach.
93706 */
93707 - if (!ret)
93708 + if (!ret) {
93709 arch_ptrace_attach(child);
93710 + gr_audit_ptrace(child);
93711 + }
93712 goto out_put_task_struct;
93713 }
93714
93715 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
93716 index 697c0a0..2402696 100644
93717 --- a/kernel/rcutorture.c
93718 +++ b/kernel/rcutorture.c
93719 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
93720 { 0 };
93721 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
93722 { 0 };
93723 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93724 -static atomic_t n_rcu_torture_alloc;
93725 -static atomic_t n_rcu_torture_alloc_fail;
93726 -static atomic_t n_rcu_torture_free;
93727 -static atomic_t n_rcu_torture_mberror;
93728 -static atomic_t n_rcu_torture_error;
93729 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93730 +static atomic_unchecked_t n_rcu_torture_alloc;
93731 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
93732 +static atomic_unchecked_t n_rcu_torture_free;
93733 +static atomic_unchecked_t n_rcu_torture_mberror;
93734 +static atomic_unchecked_t n_rcu_torture_error;
93735 static long n_rcu_torture_timers;
93736 static struct list_head rcu_torture_removed;
93737 static cpumask_var_t shuffle_tmp_mask;
93738 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
93739
93740 spin_lock_bh(&rcu_torture_lock);
93741 if (list_empty(&rcu_torture_freelist)) {
93742 - atomic_inc(&n_rcu_torture_alloc_fail);
93743 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
93744 spin_unlock_bh(&rcu_torture_lock);
93745 return NULL;
93746 }
93747 - atomic_inc(&n_rcu_torture_alloc);
93748 + atomic_inc_unchecked(&n_rcu_torture_alloc);
93749 p = rcu_torture_freelist.next;
93750 list_del_init(p);
93751 spin_unlock_bh(&rcu_torture_lock);
93752 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
93753 static void
93754 rcu_torture_free(struct rcu_torture *p)
93755 {
93756 - atomic_inc(&n_rcu_torture_free);
93757 + atomic_inc_unchecked(&n_rcu_torture_free);
93758 spin_lock_bh(&rcu_torture_lock);
93759 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
93760 spin_unlock_bh(&rcu_torture_lock);
93761 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
93762 i = rp->rtort_pipe_count;
93763 if (i > RCU_TORTURE_PIPE_LEN)
93764 i = RCU_TORTURE_PIPE_LEN;
93765 - atomic_inc(&rcu_torture_wcount[i]);
93766 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
93767 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93768 rp->rtort_mbtest = 0;
93769 rcu_torture_free(rp);
93770 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
93771 i = rp->rtort_pipe_count;
93772 if (i > RCU_TORTURE_PIPE_LEN)
93773 i = RCU_TORTURE_PIPE_LEN;
93774 - atomic_inc(&rcu_torture_wcount[i]);
93775 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
93776 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93777 rp->rtort_mbtest = 0;
93778 list_del(&rp->rtort_free);
93779 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
93780 i = old_rp->rtort_pipe_count;
93781 if (i > RCU_TORTURE_PIPE_LEN)
93782 i = RCU_TORTURE_PIPE_LEN;
93783 - atomic_inc(&rcu_torture_wcount[i]);
93784 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
93785 old_rp->rtort_pipe_count++;
93786 cur_ops->deferred_free(old_rp);
93787 }
93788 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
93789 return;
93790 }
93791 if (p->rtort_mbtest == 0)
93792 - atomic_inc(&n_rcu_torture_mberror);
93793 + atomic_inc_unchecked(&n_rcu_torture_mberror);
93794 spin_lock(&rand_lock);
93795 cur_ops->read_delay(&rand);
93796 n_rcu_torture_timers++;
93797 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
93798 continue;
93799 }
93800 if (p->rtort_mbtest == 0)
93801 - atomic_inc(&n_rcu_torture_mberror);
93802 + atomic_inc_unchecked(&n_rcu_torture_mberror);
93803 cur_ops->read_delay(&rand);
93804 preempt_disable();
93805 pipe_count = p->rtort_pipe_count;
93806 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
93807 rcu_torture_current,
93808 rcu_torture_current_version,
93809 list_empty(&rcu_torture_freelist),
93810 - atomic_read(&n_rcu_torture_alloc),
93811 - atomic_read(&n_rcu_torture_alloc_fail),
93812 - atomic_read(&n_rcu_torture_free),
93813 - atomic_read(&n_rcu_torture_mberror),
93814 + atomic_read_unchecked(&n_rcu_torture_alloc),
93815 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
93816 + atomic_read_unchecked(&n_rcu_torture_free),
93817 + atomic_read_unchecked(&n_rcu_torture_mberror),
93818 n_rcu_torture_timers);
93819 - if (atomic_read(&n_rcu_torture_mberror) != 0)
93820 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
93821 cnt += sprintf(&page[cnt], " !!!");
93822 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
93823 if (i > 1) {
93824 cnt += sprintf(&page[cnt], "!!! ");
93825 - atomic_inc(&n_rcu_torture_error);
93826 + atomic_inc_unchecked(&n_rcu_torture_error);
93827 WARN_ON_ONCE(1);
93828 }
93829 cnt += sprintf(&page[cnt], "Reader Pipe: ");
93830 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
93831 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
93832 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93833 cnt += sprintf(&page[cnt], " %d",
93834 - atomic_read(&rcu_torture_wcount[i]));
93835 + atomic_read_unchecked(&rcu_torture_wcount[i]));
93836 }
93837 cnt += sprintf(&page[cnt], "\n");
93838 if (cur_ops->stats)
93839 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
93840
93841 if (cur_ops->cleanup)
93842 cur_ops->cleanup();
93843 - if (atomic_read(&n_rcu_torture_error))
93844 + if (atomic_read_unchecked(&n_rcu_torture_error))
93845 rcu_torture_print_module_parms("End of test: FAILURE");
93846 else
93847 rcu_torture_print_module_parms("End of test: SUCCESS");
93848 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
93849
93850 rcu_torture_current = NULL;
93851 rcu_torture_current_version = 0;
93852 - atomic_set(&n_rcu_torture_alloc, 0);
93853 - atomic_set(&n_rcu_torture_alloc_fail, 0);
93854 - atomic_set(&n_rcu_torture_free, 0);
93855 - atomic_set(&n_rcu_torture_mberror, 0);
93856 - atomic_set(&n_rcu_torture_error, 0);
93857 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
93858 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
93859 + atomic_set_unchecked(&n_rcu_torture_free, 0);
93860 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
93861 + atomic_set_unchecked(&n_rcu_torture_error, 0);
93862 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
93863 - atomic_set(&rcu_torture_wcount[i], 0);
93864 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
93865 for_each_possible_cpu(cpu) {
93866 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93867 per_cpu(rcu_torture_count, cpu)[i] = 0;
93868 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
93869 index 683c4f3..97f54c6 100644
93870 --- a/kernel/rcutree.c
93871 +++ b/kernel/rcutree.c
93872 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
93873 /*
93874 * Do softirq processing for the current CPU.
93875 */
93876 -static void rcu_process_callbacks(struct softirq_action *unused)
93877 +static void rcu_process_callbacks(void)
93878 {
93879 /*
93880 * Memory references from any prior RCU read-side critical sections
93881 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
93882 index c03edf7..ac1b341 100644
93883 --- a/kernel/rcutree_plugin.h
93884 +++ b/kernel/rcutree_plugin.h
93885 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
93886 */
93887 void __rcu_read_lock(void)
93888 {
93889 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
93890 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
93891 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
93892 }
93893 EXPORT_SYMBOL_GPL(__rcu_read_lock);
93894 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
93895 struct task_struct *t = current;
93896
93897 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
93898 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
93899 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
93900 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
93901 rcu_read_unlock_special(t);
93902 }
93903 diff --git a/kernel/relay.c b/kernel/relay.c
93904 index bf343f5..908e9ee 100644
93905 --- a/kernel/relay.c
93906 +++ b/kernel/relay.c
93907 @@ -1228,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
93908 unsigned int flags,
93909 int *nonpad_ret)
93910 {
93911 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
93912 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
93913 struct rchan_buf *rbuf = in->private_data;
93914 unsigned int subbuf_size = rbuf->chan->subbuf_size;
93915 uint64_t pos = (uint64_t) *ppos;
93916 @@ -1247,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
93917 .ops = &relay_pipe_buf_ops,
93918 .spd_release = relay_page_release,
93919 };
93920 + ssize_t ret;
93921 +
93922 + pax_track_stack();
93923
93924 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
93925 return 0;
93926 diff --git a/kernel/resource.c b/kernel/resource.c
93927 index fb11a58..4e61ae1 100644
93928 --- a/kernel/resource.c
93929 +++ b/kernel/resource.c
93930 @@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
93931
93932 static int __init ioresources_init(void)
93933 {
93934 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
93935 +#ifdef CONFIG_GRKERNSEC_PROC_USER
93936 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
93937 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
93938 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
93939 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
93940 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
93941 +#endif
93942 +#else
93943 proc_create("ioports", 0, NULL, &proc_ioports_operations);
93944 proc_create("iomem", 0, NULL, &proc_iomem_operations);
93945 +#endif
93946 return 0;
93947 }
93948 __initcall(ioresources_init);
93949 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
93950 index a56f629..1fc4989 100644
93951 --- a/kernel/rtmutex-tester.c
93952 +++ b/kernel/rtmutex-tester.c
93953 @@ -21,7 +21,7 @@
93954 #define MAX_RT_TEST_MUTEXES 8
93955
93956 static spinlock_t rttest_lock;
93957 -static atomic_t rttest_event;
93958 +static atomic_unchecked_t rttest_event;
93959
93960 struct test_thread_data {
93961 int opcode;
93962 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93963
93964 case RTTEST_LOCKCONT:
93965 td->mutexes[td->opdata] = 1;
93966 - td->event = atomic_add_return(1, &rttest_event);
93967 + td->event = atomic_add_return_unchecked(1, &rttest_event);
93968 return 0;
93969
93970 case RTTEST_RESET:
93971 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93972 return 0;
93973
93974 case RTTEST_RESETEVENT:
93975 - atomic_set(&rttest_event, 0);
93976 + atomic_set_unchecked(&rttest_event, 0);
93977 return 0;
93978
93979 default:
93980 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93981 return ret;
93982
93983 td->mutexes[id] = 1;
93984 - td->event = atomic_add_return(1, &rttest_event);
93985 + td->event = atomic_add_return_unchecked(1, &rttest_event);
93986 rt_mutex_lock(&mutexes[id]);
93987 - td->event = atomic_add_return(1, &rttest_event);
93988 + td->event = atomic_add_return_unchecked(1, &rttest_event);
93989 td->mutexes[id] = 4;
93990 return 0;
93991
93992 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93993 return ret;
93994
93995 td->mutexes[id] = 1;
93996 - td->event = atomic_add_return(1, &rttest_event);
93997 + td->event = atomic_add_return_unchecked(1, &rttest_event);
93998 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
93999 - td->event = atomic_add_return(1, &rttest_event);
94000 + td->event = atomic_add_return_unchecked(1, &rttest_event);
94001 td->mutexes[id] = ret ? 0 : 4;
94002 return ret ? -EINTR : 0;
94003
94004 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
94005 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
94006 return ret;
94007
94008 - td->event = atomic_add_return(1, &rttest_event);
94009 + td->event = atomic_add_return_unchecked(1, &rttest_event);
94010 rt_mutex_unlock(&mutexes[id]);
94011 - td->event = atomic_add_return(1, &rttest_event);
94012 + td->event = atomic_add_return_unchecked(1, &rttest_event);
94013 td->mutexes[id] = 0;
94014 return 0;
94015
94016 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
94017 break;
94018
94019 td->mutexes[dat] = 2;
94020 - td->event = atomic_add_return(1, &rttest_event);
94021 + td->event = atomic_add_return_unchecked(1, &rttest_event);
94022 break;
94023
94024 case RTTEST_LOCKBKL:
94025 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
94026 return;
94027
94028 td->mutexes[dat] = 3;
94029 - td->event = atomic_add_return(1, &rttest_event);
94030 + td->event = atomic_add_return_unchecked(1, &rttest_event);
94031 break;
94032
94033 case RTTEST_LOCKNOWAIT:
94034 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
94035 return;
94036
94037 td->mutexes[dat] = 1;
94038 - td->event = atomic_add_return(1, &rttest_event);
94039 + td->event = atomic_add_return_unchecked(1, &rttest_event);
94040 return;
94041
94042 case RTTEST_LOCKBKL:
94043 diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
94044 index 29bd4ba..8c5de90 100644
94045 --- a/kernel/rtmutex.c
94046 +++ b/kernel/rtmutex.c
94047 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
94048 */
94049 spin_lock_irqsave(&pendowner->pi_lock, flags);
94050
94051 - WARN_ON(!pendowner->pi_blocked_on);
94052 + BUG_ON(!pendowner->pi_blocked_on);
94053 WARN_ON(pendowner->pi_blocked_on != waiter);
94054 WARN_ON(pendowner->pi_blocked_on->lock != lock);
94055
94056 diff --git a/kernel/sched.c b/kernel/sched.c
94057 index 0591df8..e3af3a4 100644
94058 --- a/kernel/sched.c
94059 +++ b/kernel/sched.c
94060 @@ -5043,7 +5043,7 @@ out:
94061 * In CONFIG_NO_HZ case, the idle load balance owner will do the
94062 * rebalancing for all the cpus for whom scheduler ticks are stopped.
94063 */
94064 -static void run_rebalance_domains(struct softirq_action *h)
94065 +static void run_rebalance_domains(void)
94066 {
94067 int this_cpu = smp_processor_id();
94068 struct rq *this_rq = cpu_rq(this_cpu);
94069 @@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
94070 }
94071 }
94072
94073 +#ifdef CONFIG_GRKERNSEC_SETXID
94074 +extern void gr_delayed_cred_worker(void);
94075 +static inline void gr_cred_schedule(void)
94076 +{
94077 + if (unlikely(current->delayed_cred))
94078 + gr_delayed_cred_worker();
94079 +}
94080 +#else
94081 +static inline void gr_cred_schedule(void)
94082 +{
94083 +}
94084 +#endif
94085 +
94086 /*
94087 * schedule() is the main scheduler function.
94088 */
94089 @@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
94090 struct rq *rq;
94091 int cpu;
94092
94093 + pax_track_stack();
94094 +
94095 need_resched:
94096 preempt_disable();
94097 cpu = smp_processor_id();
94098 @@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
94099
94100 schedule_debug(prev);
94101
94102 + gr_cred_schedule();
94103 +
94104 if (sched_feat(HRTICK))
94105 hrtick_clear(rq);
94106
94107 @@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
94108 * Look out! "owner" is an entirely speculative pointer
94109 * access and not reliable.
94110 */
94111 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94112 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
94113 {
94114 unsigned int cpu;
94115 struct rq *rq;
94116 @@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94117 * DEBUG_PAGEALLOC could have unmapped it if
94118 * the mutex owner just released it and exited.
94119 */
94120 - if (probe_kernel_address(&owner->cpu, cpu))
94121 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
94122 return 0;
94123 #else
94124 - cpu = owner->cpu;
94125 + cpu = task_thread_info(owner)->cpu;
94126 #endif
94127
94128 /*
94129 @@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94130 /*
94131 * Is that owner really running on that cpu?
94132 */
94133 - if (task_thread_info(rq->curr) != owner || need_resched())
94134 + if (rq->curr != owner || need_resched())
94135 return 0;
94136
94137 cpu_relax();
94138 @@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
94139 /* convert nice value [19,-20] to rlimit style value [1,40] */
94140 int nice_rlim = 20 - nice;
94141
94142 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
94143 +
94144 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
94145 capable(CAP_SYS_NICE));
94146 }
94147 @@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
94148 if (nice > 19)
94149 nice = 19;
94150
94151 - if (increment < 0 && !can_nice(current, nice))
94152 + if (increment < 0 && (!can_nice(current, nice) ||
94153 + gr_handle_chroot_nice()))
94154 return -EPERM;
94155
94156 retval = security_task_setnice(current, nice);
94157 @@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
94158 long power;
94159 int weight;
94160
94161 - WARN_ON(!sd || !sd->groups);
94162 + BUG_ON(!sd || !sd->groups);
94163
94164 if (cpu != group_first_cpu(sd->groups))
94165 return;
94166 diff --git a/kernel/signal.c b/kernel/signal.c
94167 index 2494827..cda80a0 100644
94168 --- a/kernel/signal.c
94169 +++ b/kernel/signal.c
94170 @@ -41,12 +41,12 @@
94171
94172 static struct kmem_cache *sigqueue_cachep;
94173
94174 -static void __user *sig_handler(struct task_struct *t, int sig)
94175 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
94176 {
94177 return t->sighand->action[sig - 1].sa.sa_handler;
94178 }
94179
94180 -static int sig_handler_ignored(void __user *handler, int sig)
94181 +static int sig_handler_ignored(__sighandler_t handler, int sig)
94182 {
94183 /* Is it explicitly or implicitly ignored? */
94184 return handler == SIG_IGN ||
94185 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
94186 static int sig_task_ignored(struct task_struct *t, int sig,
94187 int from_ancestor_ns)
94188 {
94189 - void __user *handler;
94190 + __sighandler_t handler;
94191
94192 handler = sig_handler(t, sig);
94193
94194 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
94195 */
94196 user = get_uid(__task_cred(t)->user);
94197 atomic_inc(&user->sigpending);
94198 +
94199 + if (!override_rlimit)
94200 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
94201 if (override_rlimit ||
94202 atomic_read(&user->sigpending) <=
94203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
94204 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
94205
94206 int unhandled_signal(struct task_struct *tsk, int sig)
94207 {
94208 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
94209 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
94210 if (is_global_init(tsk))
94211 return 1;
94212 if (handler != SIG_IGN && handler != SIG_DFL)
94213 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
94214 }
94215 }
94216
94217 + /* allow glibc communication via tgkill to other threads in our
94218 + thread group */
94219 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
94220 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
94221 + && gr_handle_signal(t, sig))
94222 + return -EPERM;
94223 +
94224 return security_task_kill(t, info, sig, 0);
94225 }
94226
94227 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94228 return send_signal(sig, info, p, 1);
94229 }
94230
94231 -static int
94232 +int
94233 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94234 {
94235 return send_signal(sig, info, t, 0);
94236 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94237 unsigned long int flags;
94238 int ret, blocked, ignored;
94239 struct k_sigaction *action;
94240 + int is_unhandled = 0;
94241
94242 spin_lock_irqsave(&t->sighand->siglock, flags);
94243 action = &t->sighand->action[sig-1];
94244 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94245 }
94246 if (action->sa.sa_handler == SIG_DFL)
94247 t->signal->flags &= ~SIGNAL_UNKILLABLE;
94248 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
94249 + is_unhandled = 1;
94250 ret = specific_send_sig_info(sig, info, t);
94251 spin_unlock_irqrestore(&t->sighand->siglock, flags);
94252
94253 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
94254 + normal operation */
94255 + if (is_unhandled) {
94256 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
94257 + gr_handle_crash(t, sig);
94258 + }
94259 +
94260 return ret;
94261 }
94262
94263 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94264 {
94265 int ret = check_kill_permission(sig, info, p);
94266
94267 - if (!ret && sig)
94268 + if (!ret && sig) {
94269 ret = do_send_sig_info(sig, info, p, true);
94270 + if (!ret)
94271 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
94272 + }
94273
94274 return ret;
94275 }
94276 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
94277 {
94278 siginfo_t info;
94279
94280 + pax_track_stack();
94281 +
94282 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
94283
94284 memset(&info, 0, sizeof info);
94285 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
94286 int error = -ESRCH;
94287
94288 rcu_read_lock();
94289 - p = find_task_by_vpid(pid);
94290 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
94291 + /* allow glibc communication via tgkill to other threads in our
94292 + thread group */
94293 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
94294 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
94295 + p = find_task_by_vpid_unrestricted(pid);
94296 + else
94297 +#endif
94298 + p = find_task_by_vpid(pid);
94299 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
94300 error = check_kill_permission(sig, info, p);
94301 /*
94302 diff --git a/kernel/smp.c b/kernel/smp.c
94303 index aa9cff3..631a0de 100644
94304 --- a/kernel/smp.c
94305 +++ b/kernel/smp.c
94306 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
94307 }
94308 EXPORT_SYMBOL(smp_call_function);
94309
94310 -void ipi_call_lock(void)
94311 +void ipi_call_lock(void) __acquires(call_function.lock)
94312 {
94313 spin_lock(&call_function.lock);
94314 }
94315
94316 -void ipi_call_unlock(void)
94317 +void ipi_call_unlock(void) __releases(call_function.lock)
94318 {
94319 spin_unlock(&call_function.lock);
94320 }
94321
94322 -void ipi_call_lock_irq(void)
94323 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
94324 {
94325 spin_lock_irq(&call_function.lock);
94326 }
94327
94328 -void ipi_call_unlock_irq(void)
94329 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
94330 {
94331 spin_unlock_irq(&call_function.lock);
94332 }
94333 diff --git a/kernel/softirq.c b/kernel/softirq.c
94334 index 04a0252..580c512 100644
94335 --- a/kernel/softirq.c
94336 +++ b/kernel/softirq.c
94337 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
94338
94339 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
94340
94341 -char *softirq_to_name[NR_SOFTIRQS] = {
94342 +const char * const softirq_to_name[NR_SOFTIRQS] = {
94343 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
94344 "TASKLET", "SCHED", "HRTIMER", "RCU"
94345 };
94346 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
94347
94348 asmlinkage void __do_softirq(void)
94349 {
94350 - struct softirq_action *h;
94351 + const struct softirq_action *h;
94352 __u32 pending;
94353 int max_restart = MAX_SOFTIRQ_RESTART;
94354 int cpu;
94355 @@ -233,7 +233,7 @@ restart:
94356 kstat_incr_softirqs_this_cpu(h - softirq_vec);
94357
94358 trace_softirq_entry(h, softirq_vec);
94359 - h->action(h);
94360 + h->action();
94361 trace_softirq_exit(h, softirq_vec);
94362 if (unlikely(prev_count != preempt_count())) {
94363 printk(KERN_ERR "huh, entered softirq %td %s %p"
94364 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
94365 local_irq_restore(flags);
94366 }
94367
94368 -void open_softirq(int nr, void (*action)(struct softirq_action *))
94369 +void open_softirq(int nr, void (*action)(void))
94370 {
94371 - softirq_vec[nr].action = action;
94372 + pax_open_kernel();
94373 + *(void **)&softirq_vec[nr].action = action;
94374 + pax_close_kernel();
94375 }
94376
94377 /*
94378 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
94379
94380 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
94381
94382 -static void tasklet_action(struct softirq_action *a)
94383 +static void tasklet_action(void)
94384 {
94385 struct tasklet_struct *list;
94386
94387 @@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
94388 }
94389 }
94390
94391 -static void tasklet_hi_action(struct softirq_action *a)
94392 +static void tasklet_hi_action(void)
94393 {
94394 struct tasklet_struct *list;
94395
94396 diff --git a/kernel/sys.c b/kernel/sys.c
94397 index e9512b1..f07185f 100644
94398 --- a/kernel/sys.c
94399 +++ b/kernel/sys.c
94400 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
94401 error = -EACCES;
94402 goto out;
94403 }
94404 +
94405 + if (gr_handle_chroot_setpriority(p, niceval)) {
94406 + error = -EACCES;
94407 + goto out;
94408 + }
94409 +
94410 no_nice = security_task_setnice(p, niceval);
94411 if (no_nice) {
94412 error = no_nice;
94413 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
94414 !(user = find_user(who)))
94415 goto out_unlock; /* No processes for this user */
94416
94417 - do_each_thread(g, p)
94418 + do_each_thread(g, p) {
94419 if (__task_cred(p)->uid == who)
94420 error = set_one_prio(p, niceval, error);
94421 - while_each_thread(g, p);
94422 + } while_each_thread(g, p);
94423 if (who != cred->uid)
94424 free_uid(user); /* For find_user() */
94425 break;
94426 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
94427 !(user = find_user(who)))
94428 goto out_unlock; /* No processes for this user */
94429
94430 - do_each_thread(g, p)
94431 + do_each_thread(g, p) {
94432 if (__task_cred(p)->uid == who) {
94433 niceval = 20 - task_nice(p);
94434 if (niceval > retval)
94435 retval = niceval;
94436 }
94437 - while_each_thread(g, p);
94438 + } while_each_thread(g, p);
94439 if (who != cred->uid)
94440 free_uid(user); /* for find_user() */
94441 break;
94442 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
94443 goto error;
94444 }
94445
94446 + if (gr_check_group_change(new->gid, new->egid, -1))
94447 + goto error;
94448 +
94449 if (rgid != (gid_t) -1 ||
94450 (egid != (gid_t) -1 && egid != old->gid))
94451 new->sgid = new->egid;
94452 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
94453 goto error;
94454
94455 retval = -EPERM;
94456 +
94457 + if (gr_check_group_change(gid, gid, gid))
94458 + goto error;
94459 +
94460 if (capable(CAP_SETGID))
94461 new->gid = new->egid = new->sgid = new->fsgid = gid;
94462 else if (gid == old->gid || gid == old->sgid)
94463 @@ -559,7 +572,7 @@ error:
94464 /*
94465 * change the user struct in a credentials set to match the new UID
94466 */
94467 -static int set_user(struct cred *new)
94468 +int set_user(struct cred *new)
94469 {
94470 struct user_struct *new_user;
94471
94472 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
94473 if (!new_user)
94474 return -EAGAIN;
94475
94476 + /*
94477 + * We don't fail in case of NPROC limit excess here because too many
94478 + * poorly written programs don't check set*uid() return code, assuming
94479 + * it never fails if called by root. We may still enforce NPROC limit
94480 + * for programs doing set*uid()+execve() by harmlessly deferring the
94481 + * failure to the execve() stage.
94482 + */
94483 if (atomic_read(&new_user->processes) >=
94484 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
94485 - new_user != INIT_USER) {
94486 - free_uid(new_user);
94487 - return -EAGAIN;
94488 - }
94489 + new_user != INIT_USER)
94490 + current->flags |= PF_NPROC_EXCEEDED;
94491 + else
94492 + current->flags &= ~PF_NPROC_EXCEEDED;
94493
94494 free_uid(new->user);
94495 new->user = new_user;
94496 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
94497 goto error;
94498 }
94499
94500 + if (gr_check_user_change(new->uid, new->euid, -1))
94501 + goto error;
94502 +
94503 if (new->uid != old->uid) {
94504 retval = set_user(new);
94505 if (retval < 0)
94506 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
94507 goto error;
94508
94509 retval = -EPERM;
94510 +
94511 + if (gr_check_crash_uid(uid))
94512 + goto error;
94513 + if (gr_check_user_change(uid, uid, uid))
94514 + goto error;
94515 +
94516 if (capable(CAP_SETUID)) {
94517 new->suid = new->uid = uid;
94518 if (uid != old->uid) {
94519 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
94520 goto error;
94521 }
94522
94523 + if (gr_check_user_change(ruid, euid, -1))
94524 + goto error;
94525 +
94526 if (ruid != (uid_t) -1) {
94527 new->uid = ruid;
94528 if (ruid != old->uid) {
94529 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
94530 goto error;
94531 }
94532
94533 + if (gr_check_group_change(rgid, egid, -1))
94534 + goto error;
94535 +
94536 if (rgid != (gid_t) -1)
94537 new->gid = rgid;
94538 if (egid != (gid_t) -1)
94539 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
94540 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
94541 goto error;
94542
94543 + if (gr_check_user_change(-1, -1, uid))
94544 + goto error;
94545 +
94546 if (uid == old->uid || uid == old->euid ||
94547 uid == old->suid || uid == old->fsuid ||
94548 capable(CAP_SETUID)) {
94549 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
94550 if (gid == old->gid || gid == old->egid ||
94551 gid == old->sgid || gid == old->fsgid ||
94552 capable(CAP_SETGID)) {
94553 + if (gr_check_group_change(-1, -1, gid))
94554 + goto error;
94555 +
94556 if (gid != old_fsgid) {
94557 new->fsgid = gid;
94558 goto change_okay;
94559 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
94560 error = get_dumpable(me->mm);
94561 break;
94562 case PR_SET_DUMPABLE:
94563 - if (arg2 < 0 || arg2 > 1) {
94564 + if (arg2 > 1) {
94565 error = -EINVAL;
94566 break;
94567 }
94568 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
94569 index b8bd058..ab6a76be 100644
94570 --- a/kernel/sysctl.c
94571 +++ b/kernel/sysctl.c
94572 @@ -63,6 +63,13 @@
94573 static int deprecated_sysctl_warning(struct __sysctl_args *args);
94574
94575 #if defined(CONFIG_SYSCTL)
94576 +#include <linux/grsecurity.h>
94577 +#include <linux/grinternal.h>
94578 +
94579 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
94580 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
94581 + const int op);
94582 +extern int gr_handle_chroot_sysctl(const int op);
94583
94584 /* External variables not in a header file. */
94585 extern int C_A_D;
94586 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
94587 static int proc_taint(struct ctl_table *table, int write,
94588 void __user *buffer, size_t *lenp, loff_t *ppos);
94589 #endif
94590 +extern ctl_table grsecurity_table[];
94591
94592 static struct ctl_table root_table[];
94593 static struct ctl_table_root sysctl_table_root;
94594 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
94595 int sysctl_legacy_va_layout;
94596 #endif
94597
94598 +#ifdef CONFIG_PAX_SOFTMODE
94599 +static ctl_table pax_table[] = {
94600 + {
94601 + .ctl_name = CTL_UNNUMBERED,
94602 + .procname = "softmode",
94603 + .data = &pax_softmode,
94604 + .maxlen = sizeof(unsigned int),
94605 + .mode = 0600,
94606 + .proc_handler = &proc_dointvec,
94607 + },
94608 +
94609 + { .ctl_name = 0 }
94610 +};
94611 +#endif
94612 +
94613 extern int prove_locking;
94614 extern int lock_stat;
94615
94616 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
94617 #endif
94618
94619 static struct ctl_table kern_table[] = {
94620 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
94621 + {
94622 + .ctl_name = CTL_UNNUMBERED,
94623 + .procname = "grsecurity",
94624 + .mode = 0500,
94625 + .child = grsecurity_table,
94626 + },
94627 +#endif
94628 +
94629 +#ifdef CONFIG_PAX_SOFTMODE
94630 + {
94631 + .ctl_name = CTL_UNNUMBERED,
94632 + .procname = "pax",
94633 + .mode = 0500,
94634 + .child = pax_table,
94635 + },
94636 +#endif
94637 +
94638 {
94639 .ctl_name = CTL_UNNUMBERED,
94640 .procname = "sched_child_runs_first",
94641 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
94642 .data = &modprobe_path,
94643 .maxlen = KMOD_PATH_LEN,
94644 .mode = 0644,
94645 - .proc_handler = &proc_dostring,
94646 - .strategy = &sysctl_string,
94647 + .proc_handler = &proc_dostring_modpriv,
94648 + .strategy = &sysctl_string_modpriv,
94649 },
94650 {
94651 .ctl_name = CTL_UNNUMBERED,
94652 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
94653 .mode = 0644,
94654 .proc_handler = &proc_dointvec
94655 },
94656 + {
94657 + .procname = "heap_stack_gap",
94658 + .data = &sysctl_heap_stack_gap,
94659 + .maxlen = sizeof(sysctl_heap_stack_gap),
94660 + .mode = 0644,
94661 + .proc_handler = proc_doulongvec_minmax,
94662 + },
94663 #else
94664 {
94665 .ctl_name = CTL_UNNUMBERED,
94666 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
94667 return 0;
94668 }
94669
94670 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
94671 +
94672 static int parse_table(int __user *name, int nlen,
94673 void __user *oldval, size_t __user *oldlenp,
94674 void __user *newval, size_t newlen,
94675 @@ -1821,7 +1871,7 @@ repeat:
94676 if (n == table->ctl_name) {
94677 int error;
94678 if (table->child) {
94679 - if (sysctl_perm(root, table, MAY_EXEC))
94680 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
94681 return -EPERM;
94682 name++;
94683 nlen--;
94684 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
94685 int error;
94686 int mode;
94687
94688 + if (table->parent != NULL && table->parent->procname != NULL &&
94689 + table->procname != NULL &&
94690 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
94691 + return -EACCES;
94692 + if (gr_handle_chroot_sysctl(op))
94693 + return -EACCES;
94694 + error = gr_handle_sysctl(table, op);
94695 + if (error)
94696 + return error;
94697 +
94698 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
94699 + if (error)
94700 + return error;
94701 +
94702 + if (root->permissions)
94703 + mode = root->permissions(root, current->nsproxy, table);
94704 + else
94705 + mode = table->mode;
94706 +
94707 + return test_perm(mode, op);
94708 +}
94709 +
94710 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
94711 +{
94712 + int error;
94713 + int mode;
94714 +
94715 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
94716 if (error)
94717 return error;
94718 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
94719 buffer, lenp, ppos);
94720 }
94721
94722 +int proc_dostring_modpriv(struct ctl_table *table, int write,
94723 + void __user *buffer, size_t *lenp, loff_t *ppos)
94724 +{
94725 + if (write && !capable(CAP_SYS_MODULE))
94726 + return -EPERM;
94727 +
94728 + return _proc_do_string(table->data, table->maxlen, write,
94729 + buffer, lenp, ppos);
94730 +}
94731 +
94732
94733 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
94734 int *valp,
94735 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
94736 vleft = table->maxlen / sizeof(unsigned long);
94737 left = *lenp;
94738
94739 - for (; left && vleft--; i++, min++, max++, first=0) {
94740 + for (; left && vleft--; i++, first=0) {
94741 if (write) {
94742 while (left) {
94743 char c;
94744 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
94745 return -ENOSYS;
94746 }
94747
94748 +int proc_dostring_modpriv(struct ctl_table *table, int write,
94749 + void __user *buffer, size_t *lenp, loff_t *ppos)
94750 +{
94751 + return -ENOSYS;
94752 +}
94753 +
94754 int proc_dointvec(struct ctl_table *table, int write,
94755 void __user *buffer, size_t *lenp, loff_t *ppos)
94756 {
94757 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
94758 return 1;
94759 }
94760
94761 +int sysctl_string_modpriv(struct ctl_table *table,
94762 + void __user *oldval, size_t __user *oldlenp,
94763 + void __user *newval, size_t newlen)
94764 +{
94765 + if (newval && newlen && !capable(CAP_SYS_MODULE))
94766 + return -EPERM;
94767 +
94768 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
94769 +}
94770 +
94771 /*
94772 * This function makes sure that all of the integers in the vector
94773 * are between the minimum and maximum values given in the arrays
94774 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
94775 return -ENOSYS;
94776 }
94777
94778 +int sysctl_string_modpriv(struct ctl_table *table,
94779 + void __user *oldval, size_t __user *oldlenp,
94780 + void __user *newval, size_t newlen)
94781 +{
94782 + return -ENOSYS;
94783 +}
94784 +
94785 int sysctl_intvec(struct ctl_table *table,
94786 void __user *oldval, size_t __user *oldlenp,
94787 void __user *newval, size_t newlen)
94788 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
94789 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
94790 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
94791 EXPORT_SYMBOL(proc_dostring);
94792 +EXPORT_SYMBOL(proc_dostring_modpriv);
94793 EXPORT_SYMBOL(proc_doulongvec_minmax);
94794 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
94795 EXPORT_SYMBOL(register_sysctl_table);
94796 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
94797 EXPORT_SYMBOL(sysctl_jiffies);
94798 EXPORT_SYMBOL(sysctl_ms_jiffies);
94799 EXPORT_SYMBOL(sysctl_string);
94800 +EXPORT_SYMBOL(sysctl_string_modpriv);
94801 EXPORT_SYMBOL(sysctl_data);
94802 EXPORT_SYMBOL(unregister_sysctl_table);
94803 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
94804 index 469193c..ea3ecb2 100644
94805 --- a/kernel/sysctl_check.c
94806 +++ b/kernel/sysctl_check.c
94807 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
94808 } else {
94809 if ((table->strategy == sysctl_data) ||
94810 (table->strategy == sysctl_string) ||
94811 + (table->strategy == sysctl_string_modpriv) ||
94812 (table->strategy == sysctl_intvec) ||
94813 (table->strategy == sysctl_jiffies) ||
94814 (table->strategy == sysctl_ms_jiffies) ||
94815 (table->proc_handler == proc_dostring) ||
94816 + (table->proc_handler == proc_dostring_modpriv) ||
94817 (table->proc_handler == proc_dointvec) ||
94818 (table->proc_handler == proc_dointvec_minmax) ||
94819 (table->proc_handler == proc_dointvec_jiffies) ||
94820 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
94821 index a4ef542..798bcd7 100644
94822 --- a/kernel/taskstats.c
94823 +++ b/kernel/taskstats.c
94824 @@ -26,9 +26,12 @@
94825 #include <linux/cgroup.h>
94826 #include <linux/fs.h>
94827 #include <linux/file.h>
94828 +#include <linux/grsecurity.h>
94829 #include <net/genetlink.h>
94830 #include <asm/atomic.h>
94831
94832 +extern int gr_is_taskstats_denied(int pid);
94833 +
94834 /*
94835 * Maximum length of a cpumask that can be specified in
94836 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
94837 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
94838 size_t size;
94839 cpumask_var_t mask;
94840
94841 + if (gr_is_taskstats_denied(current->pid))
94842 + return -EACCES;
94843 +
94844 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
94845 return -ENOMEM;
94846
94847 diff --git a/kernel/time.c b/kernel/time.c
94848 index 33df60e..ca768bd 100644
94849 --- a/kernel/time.c
94850 +++ b/kernel/time.c
94851 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
94852 return error;
94853
94854 if (tz) {
94855 + /* we log in do_settimeofday called below, so don't log twice
94856 + */
94857 + if (!tv)
94858 + gr_log_timechange();
94859 +
94860 /* SMP safe, global irq locking makes it work. */
94861 sys_tz = *tz;
94862 update_vsyscall_tz();
94863 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
94864 * Avoid unnecessary multiplications/divisions in the
94865 * two most common HZ cases:
94866 */
94867 -unsigned int inline jiffies_to_msecs(const unsigned long j)
94868 +inline unsigned int jiffies_to_msecs(const unsigned long j)
94869 {
94870 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
94871 return (MSEC_PER_SEC / HZ) * j;
94872 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
94873 }
94874 EXPORT_SYMBOL(jiffies_to_msecs);
94875
94876 -unsigned int inline jiffies_to_usecs(const unsigned long j)
94877 +inline unsigned int jiffies_to_usecs(const unsigned long j)
94878 {
94879 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
94880 return (USEC_PER_SEC / HZ) * j;
94881 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
94882 index 57b953f..06f149f 100644
94883 --- a/kernel/time/tick-broadcast.c
94884 +++ b/kernel/time/tick-broadcast.c
94885 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
94886 * then clear the broadcast bit.
94887 */
94888 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
94889 - int cpu = smp_processor_id();
94890 + cpu = smp_processor_id();
94891
94892 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
94893 tick_broadcast_clear_oneshot(cpu);
94894 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
94895 index 4a71cff..ffb5548 100644
94896 --- a/kernel/time/timekeeping.c
94897 +++ b/kernel/time/timekeeping.c
94898 @@ -14,6 +14,7 @@
94899 #include <linux/init.h>
94900 #include <linux/mm.h>
94901 #include <linux/sched.h>
94902 +#include <linux/grsecurity.h>
94903 #include <linux/sysdev.h>
94904 #include <linux/clocksource.h>
94905 #include <linux/jiffies.h>
94906 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
94907 */
94908 struct timespec ts = xtime;
94909 timespec_add_ns(&ts, nsec);
94910 - ACCESS_ONCE(xtime_cache) = ts;
94911 + ACCESS_ONCE_RW(xtime_cache) = ts;
94912 }
94913
94914 /* must hold xtime_lock */
94915 @@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
94916 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
94917 return -EINVAL;
94918
94919 + gr_log_timechange();
94920 +
94921 write_seqlock_irqsave(&xtime_lock, flags);
94922
94923 timekeeping_forward_now();
94924 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
94925 index 54c0dda..e9095d9 100644
94926 --- a/kernel/time/timer_list.c
94927 +++ b/kernel/time/timer_list.c
94928 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
94929
94930 static void print_name_offset(struct seq_file *m, void *sym)
94931 {
94932 +#ifdef CONFIG_GRKERNSEC_HIDESYM
94933 + SEQ_printf(m, "<%p>", NULL);
94934 +#else
94935 char symname[KSYM_NAME_LEN];
94936
94937 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
94938 SEQ_printf(m, "<%p>", sym);
94939 else
94940 SEQ_printf(m, "%s", symname);
94941 +#endif
94942 }
94943
94944 static void
94945 @@ -112,7 +116,11 @@ next_one:
94946 static void
94947 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
94948 {
94949 +#ifdef CONFIG_GRKERNSEC_HIDESYM
94950 + SEQ_printf(m, " .base: %p\n", NULL);
94951 +#else
94952 SEQ_printf(m, " .base: %p\n", base);
94953 +#endif
94954 SEQ_printf(m, " .index: %d\n",
94955 base->index);
94956 SEQ_printf(m, " .resolution: %Lu nsecs\n",
94957 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
94958 {
94959 struct proc_dir_entry *pe;
94960
94961 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
94962 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
94963 +#else
94964 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
94965 +#endif
94966 if (!pe)
94967 return -ENOMEM;
94968 return 0;
94969 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
94970 index ee5681f..634089b 100644
94971 --- a/kernel/time/timer_stats.c
94972 +++ b/kernel/time/timer_stats.c
94973 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
94974 static unsigned long nr_entries;
94975 static struct entry entries[MAX_ENTRIES];
94976
94977 -static atomic_t overflow_count;
94978 +static atomic_unchecked_t overflow_count;
94979
94980 /*
94981 * The entries are in a hash-table, for fast lookup:
94982 @@ -140,7 +140,7 @@ static void reset_entries(void)
94983 nr_entries = 0;
94984 memset(entries, 0, sizeof(entries));
94985 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
94986 - atomic_set(&overflow_count, 0);
94987 + atomic_set_unchecked(&overflow_count, 0);
94988 }
94989
94990 static struct entry *alloc_entry(void)
94991 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94992 if (likely(entry))
94993 entry->count++;
94994 else
94995 - atomic_inc(&overflow_count);
94996 + atomic_inc_unchecked(&overflow_count);
94997
94998 out_unlock:
94999 spin_unlock_irqrestore(lock, flags);
95000 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95001
95002 static void print_name_offset(struct seq_file *m, unsigned long addr)
95003 {
95004 +#ifdef CONFIG_GRKERNSEC_HIDESYM
95005 + seq_printf(m, "<%p>", NULL);
95006 +#else
95007 char symname[KSYM_NAME_LEN];
95008
95009 if (lookup_symbol_name(addr, symname) < 0)
95010 seq_printf(m, "<%p>", (void *)addr);
95011 else
95012 seq_printf(m, "%s", symname);
95013 +#endif
95014 }
95015
95016 static int tstats_show(struct seq_file *m, void *v)
95017 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
95018
95019 seq_puts(m, "Timer Stats Version: v0.2\n");
95020 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
95021 - if (atomic_read(&overflow_count))
95022 + if (atomic_read_unchecked(&overflow_count))
95023 seq_printf(m, "Overflow: %d entries\n",
95024 - atomic_read(&overflow_count));
95025 + atomic_read_unchecked(&overflow_count));
95026
95027 for (i = 0; i < nr_entries; i++) {
95028 entry = entries + i;
95029 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
95030 {
95031 struct proc_dir_entry *pe;
95032
95033 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
95034 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
95035 +#else
95036 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
95037 +#endif
95038 if (!pe)
95039 return -ENOMEM;
95040 return 0;
95041 diff --git a/kernel/timer.c b/kernel/timer.c
95042 index cb3c1f1..8bf5526 100644
95043 --- a/kernel/timer.c
95044 +++ b/kernel/timer.c
95045 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
95046 /*
95047 * This function runs timers and the timer-tq in bottom half context.
95048 */
95049 -static void run_timer_softirq(struct softirq_action *h)
95050 +static void run_timer_softirq(void)
95051 {
95052 struct tvec_base *base = __get_cpu_var(tvec_bases);
95053
95054 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
95055 index d9d6206..f19467e 100644
95056 --- a/kernel/trace/blktrace.c
95057 +++ b/kernel/trace/blktrace.c
95058 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
95059 struct blk_trace *bt = filp->private_data;
95060 char buf[16];
95061
95062 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
95063 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
95064
95065 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
95066 }
95067 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
95068 return 1;
95069
95070 bt = buf->chan->private_data;
95071 - atomic_inc(&bt->dropped);
95072 + atomic_inc_unchecked(&bt->dropped);
95073 return 0;
95074 }
95075
95076 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
95077
95078 bt->dir = dir;
95079 bt->dev = dev;
95080 - atomic_set(&bt->dropped, 0);
95081 + atomic_set_unchecked(&bt->dropped, 0);
95082
95083 ret = -EIO;
95084 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
95085 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
95086 index 4872937..c794d40 100644
95087 --- a/kernel/trace/ftrace.c
95088 +++ b/kernel/trace/ftrace.c
95089 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
95090
95091 ip = rec->ip;
95092
95093 + ret = ftrace_arch_code_modify_prepare();
95094 + FTRACE_WARN_ON(ret);
95095 + if (ret)
95096 + return 0;
95097 +
95098 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
95099 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
95100 if (ret) {
95101 ftrace_bug(ret, ip);
95102 rec->flags |= FTRACE_FL_FAILED;
95103 - return 0;
95104 }
95105 - return 1;
95106 + return ret ? 0 : 1;
95107 }
95108
95109 /*
95110 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
95111 index e749a05..19c6e94 100644
95112 --- a/kernel/trace/ring_buffer.c
95113 +++ b/kernel/trace/ring_buffer.c
95114 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
95115 * the reader page). But if the next page is a header page,
95116 * its flags will be non zero.
95117 */
95118 -static int inline
95119 +static inline int
95120 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
95121 struct buffer_page *page, struct list_head *list)
95122 {
95123 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
95124 index a2a2d1f..7f32b09 100644
95125 --- a/kernel/trace/trace.c
95126 +++ b/kernel/trace/trace.c
95127 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
95128 size_t rem;
95129 unsigned int i;
95130
95131 + pax_track_stack();
95132 +
95133 /* copy the tracer to avoid using a global lock all around */
95134 mutex_lock(&trace_types_lock);
95135 if (unlikely(old_tracer != current_trace && current_trace)) {
95136 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
95137 int entries, size, i;
95138 size_t ret;
95139
95140 + pax_track_stack();
95141 +
95142 if (*ppos & (PAGE_SIZE - 1)) {
95143 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
95144 return -EINVAL;
95145 @@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
95146 };
95147 #endif
95148
95149 -static struct dentry *d_tracer;
95150 -
95151 struct dentry *tracing_init_dentry(void)
95152 {
95153 + static struct dentry *d_tracer;
95154 static int once;
95155
95156 if (d_tracer)
95157 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
95158 return d_tracer;
95159 }
95160
95161 -static struct dentry *d_percpu;
95162 -
95163 struct dentry *tracing_dentry_percpu(void)
95164 {
95165 + static struct dentry *d_percpu;
95166 static int once;
95167 struct dentry *d_tracer;
95168
95169 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
95170 index d128f65..f37b4af 100644
95171 --- a/kernel/trace/trace_events.c
95172 +++ b/kernel/trace/trace_events.c
95173 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
95174 * Modules must own their file_operations to keep up with
95175 * reference counting.
95176 */
95177 +
95178 struct ftrace_module_file_ops {
95179 struct list_head list;
95180 struct module *mod;
95181 - struct file_operations id;
95182 - struct file_operations enable;
95183 - struct file_operations format;
95184 - struct file_operations filter;
95185 };
95186
95187 static void remove_subsystem_dir(const char *name)
95188 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
95189
95190 file_ops->mod = mod;
95191
95192 - file_ops->id = ftrace_event_id_fops;
95193 - file_ops->id.owner = mod;
95194 -
95195 - file_ops->enable = ftrace_enable_fops;
95196 - file_ops->enable.owner = mod;
95197 -
95198 - file_ops->filter = ftrace_event_filter_fops;
95199 - file_ops->filter.owner = mod;
95200 -
95201 - file_ops->format = ftrace_event_format_fops;
95202 - file_ops->format.owner = mod;
95203 + pax_open_kernel();
95204 + *(void **)&mod->trace_id.owner = mod;
95205 + *(void **)&mod->trace_enable.owner = mod;
95206 + *(void **)&mod->trace_filter.owner = mod;
95207 + *(void **)&mod->trace_format.owner = mod;
95208 + pax_close_kernel();
95209
95210 list_add(&file_ops->list, &ftrace_module_file_list);
95211
95212 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
95213 call->mod = mod;
95214 list_add(&call->list, &ftrace_events);
95215 event_create_dir(call, d_events,
95216 - &file_ops->id, &file_ops->enable,
95217 - &file_ops->filter, &file_ops->format);
95218 + &mod->trace_id, &mod->trace_enable,
95219 + &mod->trace_filter, &mod->trace_format);
95220 }
95221 }
95222
95223 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
95224 index 0acd834..b800b56 100644
95225 --- a/kernel/trace/trace_mmiotrace.c
95226 +++ b/kernel/trace/trace_mmiotrace.c
95227 @@ -23,7 +23,7 @@ struct header_iter {
95228 static struct trace_array *mmio_trace_array;
95229 static bool overrun_detected;
95230 static unsigned long prev_overruns;
95231 -static atomic_t dropped_count;
95232 +static atomic_unchecked_t dropped_count;
95233
95234 static void mmio_reset_data(struct trace_array *tr)
95235 {
95236 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
95237
95238 static unsigned long count_overruns(struct trace_iterator *iter)
95239 {
95240 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
95241 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
95242 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
95243
95244 if (over > prev_overruns)
95245 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
95246 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
95247 sizeof(*entry), 0, pc);
95248 if (!event) {
95249 - atomic_inc(&dropped_count);
95250 + atomic_inc_unchecked(&dropped_count);
95251 return;
95252 }
95253 entry = ring_buffer_event_data(event);
95254 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
95255 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
95256 sizeof(*entry), 0, pc);
95257 if (!event) {
95258 - atomic_inc(&dropped_count);
95259 + atomic_inc_unchecked(&dropped_count);
95260 return;
95261 }
95262 entry = ring_buffer_event_data(event);
95263 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
95264 index b6c12c6..41fdc53 100644
95265 --- a/kernel/trace/trace_output.c
95266 +++ b/kernel/trace/trace_output.c
95267 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
95268 return 0;
95269 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
95270 if (!IS_ERR(p)) {
95271 - p = mangle_path(s->buffer + s->len, p, "\n");
95272 + p = mangle_path(s->buffer + s->len, p, "\n\\");
95273 if (p) {
95274 s->len = p - s->buffer;
95275 return 1;
95276 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
95277 index 8504ac7..ecf0adb 100644
95278 --- a/kernel/trace/trace_stack.c
95279 +++ b/kernel/trace/trace_stack.c
95280 @@ -50,7 +50,7 @@ static inline void check_stack(void)
95281 return;
95282
95283 /* we do not handle interrupt stacks yet */
95284 - if (!object_is_on_stack(&this_size))
95285 + if (!object_starts_on_stack(&this_size))
95286 return;
95287
95288 local_irq_save(flags);
95289 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
95290 index 40cafb0..d5ead43 100644
95291 --- a/kernel/trace/trace_workqueue.c
95292 +++ b/kernel/trace/trace_workqueue.c
95293 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
95294 int cpu;
95295 pid_t pid;
95296 /* Can be inserted from interrupt or user context, need to be atomic */
95297 - atomic_t inserted;
95298 + atomic_unchecked_t inserted;
95299 /*
95300 * Don't need to be atomic, works are serialized in a single workqueue thread
95301 * on a single CPU.
95302 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
95303 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
95304 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
95305 if (node->pid == wq_thread->pid) {
95306 - atomic_inc(&node->inserted);
95307 + atomic_inc_unchecked(&node->inserted);
95308 goto found;
95309 }
95310 }
95311 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
95312 tsk = get_pid_task(pid, PIDTYPE_PID);
95313 if (tsk) {
95314 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
95315 - atomic_read(&cws->inserted), cws->executed,
95316 + atomic_read_unchecked(&cws->inserted), cws->executed,
95317 tsk->comm);
95318 put_task_struct(tsk);
95319 }
95320 diff --git a/kernel/user.c b/kernel/user.c
95321 index 1b91701..8795237 100644
95322 --- a/kernel/user.c
95323 +++ b/kernel/user.c
95324 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
95325 spin_lock_irq(&uidhash_lock);
95326 up = uid_hash_find(uid, hashent);
95327 if (up) {
95328 + put_user_ns(ns);
95329 key_put(new->uid_keyring);
95330 key_put(new->session_keyring);
95331 kmem_cache_free(uid_cachep, new);
95332 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
95333 index 234ceb1..ad74049 100644
95334 --- a/lib/Kconfig.debug
95335 +++ b/lib/Kconfig.debug
95336 @@ -905,7 +905,7 @@ config LATENCYTOP
95337 select STACKTRACE
95338 select SCHEDSTATS
95339 select SCHED_DEBUG
95340 - depends on HAVE_LATENCYTOP_SUPPORT
95341 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
95342 help
95343 Enable this option if you want to use the LatencyTOP tool
95344 to find out which userspace is blocking on what kernel operations.
95345 diff --git a/lib/bitmap.c b/lib/bitmap.c
95346 index 7025658..8d14cab 100644
95347 --- a/lib/bitmap.c
95348 +++ b/lib/bitmap.c
95349 @@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
95350 {
95351 int c, old_c, totaldigits, ndigits, nchunks, nbits;
95352 u32 chunk;
95353 - const char __user *ubuf = buf;
95354 + const char __user *ubuf = (const char __force_user *)buf;
95355
95356 bitmap_zero(maskp, nmaskbits);
95357
95358 @@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
95359 {
95360 if (!access_ok(VERIFY_READ, ubuf, ulen))
95361 return -EFAULT;
95362 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
95363 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
95364 }
95365 EXPORT_SYMBOL(bitmap_parse_user);
95366
95367 diff --git a/lib/bug.c b/lib/bug.c
95368 index 300e41a..2779eb0 100644
95369 --- a/lib/bug.c
95370 +++ b/lib/bug.c
95371 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
95372 return BUG_TRAP_TYPE_NONE;
95373
95374 bug = find_bug(bugaddr);
95375 + if (!bug)
95376 + return BUG_TRAP_TYPE_NONE;
95377
95378 printk(KERN_EMERG "------------[ cut here ]------------\n");
95379
95380 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
95381 index 2b413db..e21d207 100644
95382 --- a/lib/debugobjects.c
95383 +++ b/lib/debugobjects.c
95384 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
95385 if (limit > 4)
95386 return;
95387
95388 - is_on_stack = object_is_on_stack(addr);
95389 + is_on_stack = object_starts_on_stack(addr);
95390 if (is_on_stack == onstack)
95391 return;
95392
95393 diff --git a/lib/devres.c b/lib/devres.c
95394 index 72c8909..7543868 100644
95395 --- a/lib/devres.c
95396 +++ b/lib/devres.c
95397 @@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
95398 {
95399 iounmap(addr);
95400 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
95401 - (void *)addr));
95402 + (void __force *)addr));
95403 }
95404 EXPORT_SYMBOL(devm_iounmap);
95405
95406 @@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
95407 {
95408 ioport_unmap(addr);
95409 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
95410 - devm_ioport_map_match, (void *)addr));
95411 + devm_ioport_map_match, (void __force *)addr));
95412 }
95413 EXPORT_SYMBOL(devm_ioport_unmap);
95414
95415 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
95416 index 084e879..0674448 100644
95417 --- a/lib/dma-debug.c
95418 +++ b/lib/dma-debug.c
95419 @@ -861,7 +861,7 @@ out:
95420
95421 static void check_for_stack(struct device *dev, void *addr)
95422 {
95423 - if (object_is_on_stack(addr))
95424 + if (object_starts_on_stack(addr))
95425 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
95426 "stack [addr=%p]\n", addr);
95427 }
95428 diff --git a/lib/idr.c b/lib/idr.c
95429 index eda7ba3..915dfae 100644
95430 --- a/lib/idr.c
95431 +++ b/lib/idr.c
95432 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
95433 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
95434
95435 /* if already at the top layer, we need to grow */
95436 - if (id >= 1 << (idp->layers * IDR_BITS)) {
95437 + if (id >= (1 << (idp->layers * IDR_BITS))) {
95438 *starting_id = id;
95439 return IDR_NEED_TO_GROW;
95440 }
95441 diff --git a/lib/inflate.c b/lib/inflate.c
95442 index d102559..4215f31 100644
95443 --- a/lib/inflate.c
95444 +++ b/lib/inflate.c
95445 @@ -266,7 +266,7 @@ static void free(void *where)
95446 malloc_ptr = free_mem_ptr;
95447 }
95448 #else
95449 -#define malloc(a) kmalloc(a, GFP_KERNEL)
95450 +#define malloc(a) kmalloc((a), GFP_KERNEL)
95451 #define free(a) kfree(a)
95452 #endif
95453
95454 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
95455 index bd2bea9..6b3c95e 100644
95456 --- a/lib/is_single_threaded.c
95457 +++ b/lib/is_single_threaded.c
95458 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
95459 struct task_struct *p, *t;
95460 bool ret;
95461
95462 + if (!mm)
95463 + return true;
95464 +
95465 if (atomic_read(&task->signal->live) != 1)
95466 return false;
95467
95468 diff --git a/lib/kobject.c b/lib/kobject.c
95469 index b512b74..8115eb1 100644
95470 --- a/lib/kobject.c
95471 +++ b/lib/kobject.c
95472 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
95473 return ret;
95474 }
95475
95476 -struct sysfs_ops kobj_sysfs_ops = {
95477 +const struct sysfs_ops kobj_sysfs_ops = {
95478 .show = kobj_attr_show,
95479 .store = kobj_attr_store,
95480 };
95481 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
95482 * If the kset was not able to be created, NULL will be returned.
95483 */
95484 static struct kset *kset_create(const char *name,
95485 - struct kset_uevent_ops *uevent_ops,
95486 + const struct kset_uevent_ops *uevent_ops,
95487 struct kobject *parent_kobj)
95488 {
95489 struct kset *kset;
95490 @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
95491 * If the kset was not able to be created, NULL will be returned.
95492 */
95493 struct kset *kset_create_and_add(const char *name,
95494 - struct kset_uevent_ops *uevent_ops,
95495 + const struct kset_uevent_ops *uevent_ops,
95496 struct kobject *parent_kobj)
95497 {
95498 struct kset *kset;
95499 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
95500 index 507b821..0bf8ed0 100644
95501 --- a/lib/kobject_uevent.c
95502 +++ b/lib/kobject_uevent.c
95503 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
95504 const char *subsystem;
95505 struct kobject *top_kobj;
95506 struct kset *kset;
95507 - struct kset_uevent_ops *uevent_ops;
95508 + const struct kset_uevent_ops *uevent_ops;
95509 u64 seq;
95510 int i = 0;
95511 int retval = 0;
95512 diff --git a/lib/kref.c b/lib/kref.c
95513 index 9ecd6e8..12c94c1 100644
95514 --- a/lib/kref.c
95515 +++ b/lib/kref.c
95516 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
95517 */
95518 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
95519 {
95520 - WARN_ON(release == NULL);
95521 + BUG_ON(release == NULL);
95522 WARN_ON(release == (void (*)(struct kref *))kfree);
95523
95524 if (atomic_dec_and_test(&kref->refcount)) {
95525 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95526 index 92cdd99..a8149d7 100644
95527 --- a/lib/radix-tree.c
95528 +++ b/lib/radix-tree.c
95529 @@ -81,7 +81,7 @@ struct radix_tree_preload {
95530 int nr;
95531 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
95532 };
95533 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95534 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95535
95536 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
95537 {
95538 diff --git a/lib/random32.c b/lib/random32.c
95539 index 217d5c4..45aba8a 100644
95540 --- a/lib/random32.c
95541 +++ b/lib/random32.c
95542 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
95543 */
95544 static inline u32 __seed(u32 x, u32 m)
95545 {
95546 - return (x < m) ? x + m : x;
95547 + return (x <= m) ? x + m + 1 : x;
95548 }
95549
95550 /**
95551 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95552 index 33bed5e..1477e46 100644
95553 --- a/lib/vsprintf.c
95554 +++ b/lib/vsprintf.c
95555 @@ -16,6 +16,9 @@
95556 * - scnprintf and vscnprintf
95557 */
95558
95559 +#ifdef CONFIG_GRKERNSEC_HIDESYM
95560 +#define __INCLUDED_BY_HIDESYM 1
95561 +#endif
95562 #include <stdarg.h>
95563 #include <linux/module.h>
95564 #include <linux/types.h>
95565 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
95566 return buf;
95567 }
95568
95569 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
95570 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
95571 {
95572 int len, i;
95573
95574 if ((unsigned long)s < PAGE_SIZE)
95575 - s = "<NULL>";
95576 + s = "(null)";
95577
95578 len = strnlen(s, spec.precision);
95579
95580 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
95581 unsigned long value = (unsigned long) ptr;
95582 #ifdef CONFIG_KALLSYMS
95583 char sym[KSYM_SYMBOL_LEN];
95584 - if (ext != 'f' && ext != 's')
95585 + if (ext != 'f' && ext != 's' && ext != 'a')
95586 sprint_symbol(sym, value);
95587 else
95588 kallsyms_lookup(value, NULL, NULL, NULL, sym);
95589 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
95590 * - 'f' For simple symbolic function names without offset
95591 * - 'S' For symbolic direct pointers with offset
95592 * - 's' For symbolic direct pointers without offset
95593 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95594 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
95595 * - 'R' For a struct resource pointer, it prints the range of
95596 * addresses (not the name nor the flags)
95597 * - 'M' For a 6-byte MAC address, it prints the address in the
95598 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95599 struct printf_spec spec)
95600 {
95601 if (!ptr)
95602 - return string(buf, end, "(null)", spec);
95603 + return string(buf, end, "(nil)", spec);
95604
95605 switch (*fmt) {
95606 case 'F':
95607 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95608 case 's':
95609 /* Fallthrough */
95610 case 'S':
95611 +#ifdef CONFIG_GRKERNSEC_HIDESYM
95612 + break;
95613 +#else
95614 + return symbol_string(buf, end, ptr, spec, *fmt);
95615 +#endif
95616 + case 'a':
95617 + /* Fallthrough */
95618 + case 'A':
95619 return symbol_string(buf, end, ptr, spec, *fmt);
95620 case 'R':
95621 return resource_string(buf, end, ptr, spec);
95622 @@ -1445,7 +1458,7 @@ do { \
95623 size_t len;
95624 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
95625 || (unsigned long)save_str < PAGE_SIZE)
95626 - save_str = "<NULL>";
95627 + save_str = "(null)";
95628 len = strlen(save_str);
95629 if (str + len + 1 < end)
95630 memcpy(str, save_str, len + 1);
95631 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95632 typeof(type) value; \
95633 if (sizeof(type) == 8) { \
95634 args = PTR_ALIGN(args, sizeof(u32)); \
95635 - *(u32 *)&value = *(u32 *)args; \
95636 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95637 + *(u32 *)&value = *(const u32 *)args; \
95638 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95639 } else { \
95640 args = PTR_ALIGN(args, sizeof(type)); \
95641 - value = *(typeof(type) *)args; \
95642 + value = *(const typeof(type) *)args; \
95643 } \
95644 args += sizeof(type); \
95645 value; \
95646 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95647 const char *str_arg = args;
95648 size_t len = strlen(str_arg);
95649 args += len + 1;
95650 - str = string(str, end, (char *)str_arg, spec);
95651 + str = string(str, end, str_arg, spec);
95652 break;
95653 }
95654
95655 diff --git a/localversion-grsec b/localversion-grsec
95656 new file mode 100644
95657 index 0000000..7cd6065
95658 --- /dev/null
95659 +++ b/localversion-grsec
95660 @@ -0,0 +1 @@
95661 +-grsec
95662 diff --git a/mm/Kconfig b/mm/Kconfig
95663 index 2c19c0b..f3c3f83 100644
95664 --- a/mm/Kconfig
95665 +++ b/mm/Kconfig
95666 @@ -228,7 +228,7 @@ config KSM
95667 config DEFAULT_MMAP_MIN_ADDR
95668 int "Low address space to protect from user allocation"
95669 depends on MMU
95670 - default 4096
95671 + default 65536
95672 help
95673 This is the portion of low virtual memory which should be protected
95674 from userspace allocation. Keeping a user from writing to low pages
95675 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95676 index d824401..9f5244a 100644
95677 --- a/mm/backing-dev.c
95678 +++ b/mm/backing-dev.c
95679 @@ -271,7 +271,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
95680 list_add_tail_rcu(&wb->list, &bdi->wb_list);
95681 spin_unlock(&bdi->wb_lock);
95682
95683 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
95684 + tsk->flags |= PF_SWAPWRITE;
95685 set_freezable();
95686
95687 /*
95688 @@ -489,7 +489,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
95689 * Add the default flusher task that gets created for any bdi
95690 * that has dirty data pending writeout
95691 */
95692 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
95693 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
95694 {
95695 if (!bdi_cap_writeback_dirty(bdi))
95696 return;
95697 diff --git a/mm/filemap.c b/mm/filemap.c
95698 index a1fe378..e26702f 100644
95699 --- a/mm/filemap.c
95700 +++ b/mm/filemap.c
95701 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95702 struct address_space *mapping = file->f_mapping;
95703
95704 if (!mapping->a_ops->readpage)
95705 - return -ENOEXEC;
95706 + return -ENODEV;
95707 file_accessed(file);
95708 vma->vm_ops = &generic_file_vm_ops;
95709 vma->vm_flags |= VM_CAN_NONLINEAR;
95710 @@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95711 *pos = i_size_read(inode);
95712
95713 if (limit != RLIM_INFINITY) {
95714 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95715 if (*pos >= limit) {
95716 send_sig(SIGXFSZ, current, 0);
95717 return -EFBIG;
95718 diff --git a/mm/fremap.c b/mm/fremap.c
95719 index b6ec85a..a24ac22 100644
95720 --- a/mm/fremap.c
95721 +++ b/mm/fremap.c
95722 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95723 retry:
95724 vma = find_vma(mm, start);
95725
95726 +#ifdef CONFIG_PAX_SEGMEXEC
95727 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95728 + goto out;
95729 +#endif
95730 +
95731 /*
95732 * Make sure the vma is shared, that it supports prefaulting,
95733 * and that the remapped range is valid and fully within
95734 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95735 /*
95736 * drop PG_Mlocked flag for over-mapped range
95737 */
95738 - unsigned int saved_flags = vma->vm_flags;
95739 + unsigned long saved_flags = vma->vm_flags;
95740 munlock_vma_pages_range(vma, start, start + size);
95741 vma->vm_flags = saved_flags;
95742 }
95743 diff --git a/mm/highmem.c b/mm/highmem.c
95744 index 9c1e627..5ca9447 100644
95745 --- a/mm/highmem.c
95746 +++ b/mm/highmem.c
95747 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
95748 * So no dangers, even with speculative execution.
95749 */
95750 page = pte_page(pkmap_page_table[i]);
95751 + pax_open_kernel();
95752 pte_clear(&init_mm, (unsigned long)page_address(page),
95753 &pkmap_page_table[i]);
95754 -
95755 + pax_close_kernel();
95756 set_page_address(page, NULL);
95757 need_flush = 1;
95758 }
95759 @@ -177,9 +178,11 @@ start:
95760 }
95761 }
95762 vaddr = PKMAP_ADDR(last_pkmap_nr);
95763 +
95764 + pax_open_kernel();
95765 set_pte_at(&init_mm, vaddr,
95766 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95767 -
95768 + pax_close_kernel();
95769 pkmap_count[last_pkmap_nr] = 1;
95770 set_page_address(page, (void *)vaddr);
95771
95772 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95773 index 5e1e508..ac70275 100644
95774 --- a/mm/hugetlb.c
95775 +++ b/mm/hugetlb.c
95776 @@ -869,6 +869,7 @@ free:
95777 list_del(&page->lru);
95778 enqueue_huge_page(h, page);
95779 }
95780 + spin_unlock(&hugetlb_lock);
95781
95782 /* Free unnecessary surplus pages to the buddy allocator */
95783 if (!list_empty(&surplus_list)) {
95784 @@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95785 return 1;
95786 }
95787
95788 +#ifdef CONFIG_PAX_SEGMEXEC
95789 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95790 +{
95791 + struct mm_struct *mm = vma->vm_mm;
95792 + struct vm_area_struct *vma_m;
95793 + unsigned long address_m;
95794 + pte_t *ptep_m;
95795 +
95796 + vma_m = pax_find_mirror_vma(vma);
95797 + if (!vma_m)
95798 + return;
95799 +
95800 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95801 + address_m = address + SEGMEXEC_TASK_SIZE;
95802 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95803 + get_page(page_m);
95804 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95805 +}
95806 +#endif
95807 +
95808 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
95809 unsigned long address, pte_t *ptep, pte_t pte,
95810 struct page *pagecache_page)
95811 @@ -2004,6 +2025,11 @@ retry_avoidcopy:
95812 huge_ptep_clear_flush(vma, address, ptep);
95813 set_huge_pte_at(mm, address, ptep,
95814 make_huge_pte(vma, new_page, 1));
95815 +
95816 +#ifdef CONFIG_PAX_SEGMEXEC
95817 + pax_mirror_huge_pte(vma, address, new_page);
95818 +#endif
95819 +
95820 /* Make the old page be freed below */
95821 new_page = old_page;
95822 }
95823 @@ -2135,6 +2161,10 @@ retry:
95824 && (vma->vm_flags & VM_SHARED)));
95825 set_huge_pte_at(mm, address, ptep, new_pte);
95826
95827 +#ifdef CONFIG_PAX_SEGMEXEC
95828 + pax_mirror_huge_pte(vma, address, page);
95829 +#endif
95830 +
95831 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95832 /* Optimization, do the COW without a second fault */
95833 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
95834 @@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95835 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
95836 struct hstate *h = hstate_vma(vma);
95837
95838 +#ifdef CONFIG_PAX_SEGMEXEC
95839 + struct vm_area_struct *vma_m;
95840 +
95841 + vma_m = pax_find_mirror_vma(vma);
95842 + if (vma_m) {
95843 + unsigned long address_m;
95844 +
95845 + if (vma->vm_start > vma_m->vm_start) {
95846 + address_m = address;
95847 + address -= SEGMEXEC_TASK_SIZE;
95848 + vma = vma_m;
95849 + h = hstate_vma(vma);
95850 + } else
95851 + address_m = address + SEGMEXEC_TASK_SIZE;
95852 +
95853 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95854 + return VM_FAULT_OOM;
95855 + address_m &= HPAGE_MASK;
95856 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95857 + }
95858 +#endif
95859 +
95860 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95861 if (!ptep)
95862 return VM_FAULT_OOM;
95863 diff --git a/mm/internal.h b/mm/internal.h
95864 index f03e8e2..7354343 100644
95865 --- a/mm/internal.h
95866 +++ b/mm/internal.h
95867 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
95868 * in mm/page_alloc.c
95869 */
95870 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95871 +extern void free_compound_page(struct page *page);
95872 extern void prep_compound_page(struct page *page, unsigned long order);
95873
95874
95875 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95876 index c346660..b47382f 100644
95877 --- a/mm/kmemleak.c
95878 +++ b/mm/kmemleak.c
95879 @@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
95880
95881 for (i = 0; i < object->trace_len; i++) {
95882 void *ptr = (void *)object->trace[i];
95883 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95884 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
95885 }
95886 }
95887
95888 diff --git a/mm/maccess.c b/mm/maccess.c
95889 index 9073695..1127f348 100644
95890 --- a/mm/maccess.c
95891 +++ b/mm/maccess.c
95892 @@ -14,7 +14,7 @@
95893 * Safely read from address @src to the buffer at @dst. If a kernel fault
95894 * happens, handle that and return -EFAULT.
95895 */
95896 -long probe_kernel_read(void *dst, void *src, size_t size)
95897 +long probe_kernel_read(void *dst, const void *src, size_t size)
95898 {
95899 long ret;
95900 mm_segment_t old_fs = get_fs();
95901 @@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
95902 set_fs(KERNEL_DS);
95903 pagefault_disable();
95904 ret = __copy_from_user_inatomic(dst,
95905 - (__force const void __user *)src, size);
95906 + (const void __force_user *)src, size);
95907 pagefault_enable();
95908 set_fs(old_fs);
95909
95910 @@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
95911 * Safely write to address @dst from the buffer at @src. If a kernel fault
95912 * happens, handle that and return -EFAULT.
95913 */
95914 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
95915 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
95916 {
95917 long ret;
95918 mm_segment_t old_fs = get_fs();
95919
95920 set_fs(KERNEL_DS);
95921 pagefault_disable();
95922 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95923 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95924 pagefault_enable();
95925 set_fs(old_fs);
95926
95927 diff --git a/mm/madvise.c b/mm/madvise.c
95928 index 35b1479..499f7d4 100644
95929 --- a/mm/madvise.c
95930 +++ b/mm/madvise.c
95931 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
95932 pgoff_t pgoff;
95933 unsigned long new_flags = vma->vm_flags;
95934
95935 +#ifdef CONFIG_PAX_SEGMEXEC
95936 + struct vm_area_struct *vma_m;
95937 +#endif
95938 +
95939 switch (behavior) {
95940 case MADV_NORMAL:
95941 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95942 @@ -103,6 +107,13 @@ success:
95943 /*
95944 * vm_flags is protected by the mmap_sem held in write mode.
95945 */
95946 +
95947 +#ifdef CONFIG_PAX_SEGMEXEC
95948 + vma_m = pax_find_mirror_vma(vma);
95949 + if (vma_m)
95950 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95951 +#endif
95952 +
95953 vma->vm_flags = new_flags;
95954
95955 out:
95956 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
95957 struct vm_area_struct ** prev,
95958 unsigned long start, unsigned long end)
95959 {
95960 +
95961 +#ifdef CONFIG_PAX_SEGMEXEC
95962 + struct vm_area_struct *vma_m;
95963 +#endif
95964 +
95965 *prev = vma;
95966 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95967 return -EINVAL;
95968 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
95969 zap_page_range(vma, start, end - start, &details);
95970 } else
95971 zap_page_range(vma, start, end - start, NULL);
95972 +
95973 +#ifdef CONFIG_PAX_SEGMEXEC
95974 + vma_m = pax_find_mirror_vma(vma);
95975 + if (vma_m) {
95976 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95977 + struct zap_details details = {
95978 + .nonlinear_vma = vma_m,
95979 + .last_index = ULONG_MAX,
95980 + };
95981 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95982 + } else
95983 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95984 + }
95985 +#endif
95986 +
95987 return 0;
95988 }
95989
95990 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95991 if (end < start)
95992 goto out;
95993
95994 +#ifdef CONFIG_PAX_SEGMEXEC
95995 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95996 + if (end > SEGMEXEC_TASK_SIZE)
95997 + goto out;
95998 + } else
95999 +#endif
96000 +
96001 + if (end > TASK_SIZE)
96002 + goto out;
96003 +
96004 error = 0;
96005 if (end == start)
96006 goto out;
96007 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
96008 index 8aeba53..b4a4198 100644
96009 --- a/mm/memory-failure.c
96010 +++ b/mm/memory-failure.c
96011 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
96012
96013 int sysctl_memory_failure_recovery __read_mostly = 1;
96014
96015 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
96016 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
96017
96018 /*
96019 * Send all the processes who have the page mapped an ``action optional''
96020 @@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
96021 si.si_signo = SIGBUS;
96022 si.si_errno = 0;
96023 si.si_code = BUS_MCEERR_AO;
96024 - si.si_addr = (void *)addr;
96025 + si.si_addr = (void __user *)addr;
96026 #ifdef __ARCH_SI_TRAPNO
96027 si.si_trapno = trapno;
96028 #endif
96029 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
96030 return 0;
96031 }
96032
96033 - atomic_long_add(1, &mce_bad_pages);
96034 + atomic_long_add_unchecked(1, &mce_bad_pages);
96035
96036 /*
96037 * We need/can do nothing about count=0 pages.
96038 diff --git a/mm/memory.c b/mm/memory.c
96039 index 6c836d3..48f3264 100644
96040 --- a/mm/memory.c
96041 +++ b/mm/memory.c
96042 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96043 return;
96044
96045 pmd = pmd_offset(pud, start);
96046 +
96047 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96048 pud_clear(pud);
96049 pmd_free_tlb(tlb, pmd, start);
96050 +#endif
96051 +
96052 }
96053
96054 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96055 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96056 if (end - 1 > ceiling - 1)
96057 return;
96058
96059 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96060 pud = pud_offset(pgd, start);
96061 pgd_clear(pgd);
96062 pud_free_tlb(tlb, pud, start);
96063 +#endif
96064 +
96065 }
96066
96067 /*
96068 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96069 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
96070 i = 0;
96071
96072 - do {
96073 + while (nr_pages) {
96074 struct vm_area_struct *vma;
96075
96076 - vma = find_extend_vma(mm, start);
96077 + vma = find_vma(mm, start);
96078 if (!vma && in_gate_area(tsk, start)) {
96079 unsigned long pg = start & PAGE_MASK;
96080 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
96081 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96082 continue;
96083 }
96084
96085 - if (!vma ||
96086 + if (!vma || start < vma->vm_start ||
96087 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
96088 !(vm_flags & vma->vm_flags))
96089 return i ? : -EFAULT;
96090 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96091 start += PAGE_SIZE;
96092 nr_pages--;
96093 } while (nr_pages && start < vma->vm_end);
96094 - } while (nr_pages);
96095 + }
96096 return i;
96097 }
96098
96099 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96100 page_add_file_rmap(page);
96101 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96102
96103 +#ifdef CONFIG_PAX_SEGMEXEC
96104 + pax_mirror_file_pte(vma, addr, page, ptl);
96105 +#endif
96106 +
96107 retval = 0;
96108 pte_unmap_unlock(pte, ptl);
96109 return retval;
96110 @@ -1560,10 +1571,22 @@ out:
96111 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96112 struct page *page)
96113 {
96114 +
96115 +#ifdef CONFIG_PAX_SEGMEXEC
96116 + struct vm_area_struct *vma_m;
96117 +#endif
96118 +
96119 if (addr < vma->vm_start || addr >= vma->vm_end)
96120 return -EFAULT;
96121 if (!page_count(page))
96122 return -EINVAL;
96123 +
96124 +#ifdef CONFIG_PAX_SEGMEXEC
96125 + vma_m = pax_find_mirror_vma(vma);
96126 + if (vma_m)
96127 + vma_m->vm_flags |= VM_INSERTPAGE;
96128 +#endif
96129 +
96130 vma->vm_flags |= VM_INSERTPAGE;
96131 return insert_page(vma, addr, page, vma->vm_page_prot);
96132 }
96133 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96134 unsigned long pfn)
96135 {
96136 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96137 + BUG_ON(vma->vm_mirror);
96138
96139 if (addr < vma->vm_start || addr >= vma->vm_end)
96140 return -EFAULT;
96141 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
96142 copy_user_highpage(dst, src, va, vma);
96143 }
96144
96145 +#ifdef CONFIG_PAX_SEGMEXEC
96146 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96147 +{
96148 + struct mm_struct *mm = vma->vm_mm;
96149 + spinlock_t *ptl;
96150 + pte_t *pte, entry;
96151 +
96152 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96153 + entry = *pte;
96154 + if (!pte_present(entry)) {
96155 + if (!pte_none(entry)) {
96156 + BUG_ON(pte_file(entry));
96157 + free_swap_and_cache(pte_to_swp_entry(entry));
96158 + pte_clear_not_present_full(mm, address, pte, 0);
96159 + }
96160 + } else {
96161 + struct page *page;
96162 +
96163 + flush_cache_page(vma, address, pte_pfn(entry));
96164 + entry = ptep_clear_flush(vma, address, pte);
96165 + BUG_ON(pte_dirty(entry));
96166 + page = vm_normal_page(vma, address, entry);
96167 + if (page) {
96168 + update_hiwater_rss(mm);
96169 + if (PageAnon(page))
96170 + dec_mm_counter(mm, anon_rss);
96171 + else
96172 + dec_mm_counter(mm, file_rss);
96173 + page_remove_rmap(page);
96174 + page_cache_release(page);
96175 + }
96176 + }
96177 + pte_unmap_unlock(pte, ptl);
96178 +}
96179 +
96180 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
96181 + *
96182 + * the ptl of the lower mapped page is held on entry and is not released on exit
96183 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96184 + */
96185 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96186 +{
96187 + struct mm_struct *mm = vma->vm_mm;
96188 + unsigned long address_m;
96189 + spinlock_t *ptl_m;
96190 + struct vm_area_struct *vma_m;
96191 + pmd_t *pmd_m;
96192 + pte_t *pte_m, entry_m;
96193 +
96194 + BUG_ON(!page_m || !PageAnon(page_m));
96195 +
96196 + vma_m = pax_find_mirror_vma(vma);
96197 + if (!vma_m)
96198 + return;
96199 +
96200 + BUG_ON(!PageLocked(page_m));
96201 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96202 + address_m = address + SEGMEXEC_TASK_SIZE;
96203 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96204 + pte_m = pte_offset_map_nested(pmd_m, address_m);
96205 + ptl_m = pte_lockptr(mm, pmd_m);
96206 + if (ptl != ptl_m) {
96207 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96208 + if (!pte_none(*pte_m))
96209 + goto out;
96210 + }
96211 +
96212 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96213 + page_cache_get(page_m);
96214 + page_add_anon_rmap(page_m, vma_m, address_m);
96215 + inc_mm_counter(mm, anon_rss);
96216 + set_pte_at(mm, address_m, pte_m, entry_m);
96217 + update_mmu_cache(vma_m, address_m, entry_m);
96218 +out:
96219 + if (ptl != ptl_m)
96220 + spin_unlock(ptl_m);
96221 + pte_unmap_nested(pte_m);
96222 + unlock_page(page_m);
96223 +}
96224 +
96225 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96226 +{
96227 + struct mm_struct *mm = vma->vm_mm;
96228 + unsigned long address_m;
96229 + spinlock_t *ptl_m;
96230 + struct vm_area_struct *vma_m;
96231 + pmd_t *pmd_m;
96232 + pte_t *pte_m, entry_m;
96233 +
96234 + BUG_ON(!page_m || PageAnon(page_m));
96235 +
96236 + vma_m = pax_find_mirror_vma(vma);
96237 + if (!vma_m)
96238 + return;
96239 +
96240 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96241 + address_m = address + SEGMEXEC_TASK_SIZE;
96242 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96243 + pte_m = pte_offset_map_nested(pmd_m, address_m);
96244 + ptl_m = pte_lockptr(mm, pmd_m);
96245 + if (ptl != ptl_m) {
96246 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96247 + if (!pte_none(*pte_m))
96248 + goto out;
96249 + }
96250 +
96251 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96252 + page_cache_get(page_m);
96253 + page_add_file_rmap(page_m);
96254 + inc_mm_counter(mm, file_rss);
96255 + set_pte_at(mm, address_m, pte_m, entry_m);
96256 + update_mmu_cache(vma_m, address_m, entry_m);
96257 +out:
96258 + if (ptl != ptl_m)
96259 + spin_unlock(ptl_m);
96260 + pte_unmap_nested(pte_m);
96261 +}
96262 +
96263 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96264 +{
96265 + struct mm_struct *mm = vma->vm_mm;
96266 + unsigned long address_m;
96267 + spinlock_t *ptl_m;
96268 + struct vm_area_struct *vma_m;
96269 + pmd_t *pmd_m;
96270 + pte_t *pte_m, entry_m;
96271 +
96272 + vma_m = pax_find_mirror_vma(vma);
96273 + if (!vma_m)
96274 + return;
96275 +
96276 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96277 + address_m = address + SEGMEXEC_TASK_SIZE;
96278 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96279 + pte_m = pte_offset_map_nested(pmd_m, address_m);
96280 + ptl_m = pte_lockptr(mm, pmd_m);
96281 + if (ptl != ptl_m) {
96282 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96283 + if (!pte_none(*pte_m))
96284 + goto out;
96285 + }
96286 +
96287 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96288 + set_pte_at(mm, address_m, pte_m, entry_m);
96289 +out:
96290 + if (ptl != ptl_m)
96291 + spin_unlock(ptl_m);
96292 + pte_unmap_nested(pte_m);
96293 +}
96294 +
96295 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96296 +{
96297 + struct page *page_m;
96298 + pte_t entry;
96299 +
96300 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96301 + goto out;
96302 +
96303 + entry = *pte;
96304 + page_m = vm_normal_page(vma, address, entry);
96305 + if (!page_m)
96306 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96307 + else if (PageAnon(page_m)) {
96308 + if (pax_find_mirror_vma(vma)) {
96309 + pte_unmap_unlock(pte, ptl);
96310 + lock_page(page_m);
96311 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96312 + if (pte_same(entry, *pte))
96313 + pax_mirror_anon_pte(vma, address, page_m, ptl);
96314 + else
96315 + unlock_page(page_m);
96316 + }
96317 + } else
96318 + pax_mirror_file_pte(vma, address, page_m, ptl);
96319 +
96320 +out:
96321 + pte_unmap_unlock(pte, ptl);
96322 +}
96323 +#endif
96324 +
96325 /*
96326 * This routine handles present pages, when users try to write
96327 * to a shared page. It is done by copying the page to a new address
96328 @@ -2156,6 +2360,12 @@ gotten:
96329 */
96330 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96331 if (likely(pte_same(*page_table, orig_pte))) {
96332 +
96333 +#ifdef CONFIG_PAX_SEGMEXEC
96334 + if (pax_find_mirror_vma(vma))
96335 + BUG_ON(!trylock_page(new_page));
96336 +#endif
96337 +
96338 if (old_page) {
96339 if (!PageAnon(old_page)) {
96340 dec_mm_counter(mm, file_rss);
96341 @@ -2207,6 +2417,10 @@ gotten:
96342 page_remove_rmap(old_page);
96343 }
96344
96345 +#ifdef CONFIG_PAX_SEGMEXEC
96346 + pax_mirror_anon_pte(vma, address, new_page, ptl);
96347 +#endif
96348 +
96349 /* Free the old page.. */
96350 new_page = old_page;
96351 ret |= VM_FAULT_WRITE;
96352 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96353 swap_free(entry);
96354 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96355 try_to_free_swap(page);
96356 +
96357 +#ifdef CONFIG_PAX_SEGMEXEC
96358 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96359 +#endif
96360 +
96361 unlock_page(page);
96362
96363 if (flags & FAULT_FLAG_WRITE) {
96364 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96365
96366 /* No need to invalidate - it was non-present before */
96367 update_mmu_cache(vma, address, pte);
96368 +
96369 +#ifdef CONFIG_PAX_SEGMEXEC
96370 + pax_mirror_anon_pte(vma, address, page, ptl);
96371 +#endif
96372 +
96373 unlock:
96374 pte_unmap_unlock(page_table, ptl);
96375 out:
96376 @@ -2632,40 +2856,6 @@ out_release:
96377 }
96378
96379 /*
96380 - * This is like a special single-page "expand_{down|up}wards()",
96381 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
96382 - * doesn't hit another vma.
96383 - */
96384 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96385 -{
96386 - address &= PAGE_MASK;
96387 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96388 - struct vm_area_struct *prev = vma->vm_prev;
96389 -
96390 - /*
96391 - * Is there a mapping abutting this one below?
96392 - *
96393 - * That's only ok if it's the same stack mapping
96394 - * that has gotten split..
96395 - */
96396 - if (prev && prev->vm_end == address)
96397 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96398 -
96399 - expand_stack(vma, address - PAGE_SIZE);
96400 - }
96401 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96402 - struct vm_area_struct *next = vma->vm_next;
96403 -
96404 - /* As VM_GROWSDOWN but s/below/above/ */
96405 - if (next && next->vm_start == address + PAGE_SIZE)
96406 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96407 -
96408 - expand_upwards(vma, address + PAGE_SIZE);
96409 - }
96410 - return 0;
96411 -}
96412 -
96413 -/*
96414 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96415 * but allow concurrent faults), and pte mapped but not yet locked.
96416 * We return with mmap_sem still held, but pte unmapped and unlocked.
96417 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96418 unsigned long address, pte_t *page_table, pmd_t *pmd,
96419 unsigned int flags)
96420 {
96421 - struct page *page;
96422 + struct page *page = NULL;
96423 spinlock_t *ptl;
96424 pte_t entry;
96425
96426 - pte_unmap(page_table);
96427 -
96428 - /* Check if we need to add a guard page to the stack */
96429 - if (check_stack_guard_page(vma, address) < 0)
96430 - return VM_FAULT_SIGBUS;
96431 -
96432 - /* Use the zero-page for reads */
96433 if (!(flags & FAULT_FLAG_WRITE)) {
96434 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96435 vma->vm_page_prot));
96436 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96437 + ptl = pte_lockptr(mm, pmd);
96438 + spin_lock(ptl);
96439 if (!pte_none(*page_table))
96440 goto unlock;
96441 goto setpte;
96442 }
96443
96444 /* Allocate our own private page. */
96445 + pte_unmap(page_table);
96446 +
96447 if (unlikely(anon_vma_prepare(vma)))
96448 goto oom;
96449 page = alloc_zeroed_user_highpage_movable(vma, address);
96450 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96451 if (!pte_none(*page_table))
96452 goto release;
96453
96454 +#ifdef CONFIG_PAX_SEGMEXEC
96455 + if (pax_find_mirror_vma(vma))
96456 + BUG_ON(!trylock_page(page));
96457 +#endif
96458 +
96459 inc_mm_counter(mm, anon_rss);
96460 page_add_new_anon_rmap(page, vma, address);
96461 setpte:
96462 @@ -2720,6 +2911,12 @@ setpte:
96463
96464 /* No need to invalidate - it was non-present before */
96465 update_mmu_cache(vma, address, entry);
96466 +
96467 +#ifdef CONFIG_PAX_SEGMEXEC
96468 + if (page)
96469 + pax_mirror_anon_pte(vma, address, page, ptl);
96470 +#endif
96471 +
96472 unlock:
96473 pte_unmap_unlock(page_table, ptl);
96474 return 0;
96475 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96476 */
96477 /* Only go through if we didn't race with anybody else... */
96478 if (likely(pte_same(*page_table, orig_pte))) {
96479 +
96480 +#ifdef CONFIG_PAX_SEGMEXEC
96481 + if (anon && pax_find_mirror_vma(vma))
96482 + BUG_ON(!trylock_page(page));
96483 +#endif
96484 +
96485 flush_icache_page(vma, page);
96486 entry = mk_pte(page, vma->vm_page_prot);
96487 if (flags & FAULT_FLAG_WRITE)
96488 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96489
96490 /* no need to invalidate: a not-present page won't be cached */
96491 update_mmu_cache(vma, address, entry);
96492 +
96493 +#ifdef CONFIG_PAX_SEGMEXEC
96494 + if (anon)
96495 + pax_mirror_anon_pte(vma, address, page, ptl);
96496 + else
96497 + pax_mirror_file_pte(vma, address, page, ptl);
96498 +#endif
96499 +
96500 } else {
96501 if (charged)
96502 mem_cgroup_uncharge_page(page);
96503 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
96504 if (flags & FAULT_FLAG_WRITE)
96505 flush_tlb_page(vma, address);
96506 }
96507 +
96508 +#ifdef CONFIG_PAX_SEGMEXEC
96509 + pax_mirror_pte(vma, address, pte, pmd, ptl);
96510 + return 0;
96511 +#endif
96512 +
96513 unlock:
96514 pte_unmap_unlock(pte, ptl);
96515 return 0;
96516 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96517 pmd_t *pmd;
96518 pte_t *pte;
96519
96520 +#ifdef CONFIG_PAX_SEGMEXEC
96521 + struct vm_area_struct *vma_m;
96522 +#endif
96523 +
96524 __set_current_state(TASK_RUNNING);
96525
96526 count_vm_event(PGFAULT);
96527 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96528 if (unlikely(is_vm_hugetlb_page(vma)))
96529 return hugetlb_fault(mm, vma, address, flags);
96530
96531 +#ifdef CONFIG_PAX_SEGMEXEC
96532 + vma_m = pax_find_mirror_vma(vma);
96533 + if (vma_m) {
96534 + unsigned long address_m;
96535 + pgd_t *pgd_m;
96536 + pud_t *pud_m;
96537 + pmd_t *pmd_m;
96538 +
96539 + if (vma->vm_start > vma_m->vm_start) {
96540 + address_m = address;
96541 + address -= SEGMEXEC_TASK_SIZE;
96542 + vma = vma_m;
96543 + } else
96544 + address_m = address + SEGMEXEC_TASK_SIZE;
96545 +
96546 + pgd_m = pgd_offset(mm, address_m);
96547 + pud_m = pud_alloc(mm, pgd_m, address_m);
96548 + if (!pud_m)
96549 + return VM_FAULT_OOM;
96550 + pmd_m = pmd_alloc(mm, pud_m, address_m);
96551 + if (!pmd_m)
96552 + return VM_FAULT_OOM;
96553 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
96554 + return VM_FAULT_OOM;
96555 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96556 + }
96557 +#endif
96558 +
96559 pgd = pgd_offset(mm, address);
96560 pud = pud_alloc(mm, pgd, address);
96561 if (!pud)
96562 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
96563 gate_vma.vm_start = FIXADDR_USER_START;
96564 gate_vma.vm_end = FIXADDR_USER_END;
96565 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
96566 - gate_vma.vm_page_prot = __P101;
96567 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
96568 /*
96569 * Make sure the vDSO gets into every core dump.
96570 * Dumping its contents makes post-mortem fully interpretable later
96571 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96572 index 3c6e3e2..b1ddbb8 100644
96573 --- a/mm/mempolicy.c
96574 +++ b/mm/mempolicy.c
96575 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
96576 struct vm_area_struct *next;
96577 int err;
96578
96579 +#ifdef CONFIG_PAX_SEGMEXEC
96580 + struct vm_area_struct *vma_m;
96581 +#endif
96582 +
96583 err = 0;
96584 for (; vma && vma->vm_start < end; vma = next) {
96585 next = vma->vm_next;
96586 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
96587 err = policy_vma(vma, new);
96588 if (err)
96589 break;
96590 +
96591 +#ifdef CONFIG_PAX_SEGMEXEC
96592 + vma_m = pax_find_mirror_vma(vma);
96593 + if (vma_m) {
96594 + err = policy_vma(vma_m, new);
96595 + if (err)
96596 + break;
96597 + }
96598 +#endif
96599 +
96600 }
96601 return err;
96602 }
96603 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96604
96605 if (end < start)
96606 return -EINVAL;
96607 +
96608 +#ifdef CONFIG_PAX_SEGMEXEC
96609 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96610 + if (end > SEGMEXEC_TASK_SIZE)
96611 + return -EINVAL;
96612 + } else
96613 +#endif
96614 +
96615 + if (end > TASK_SIZE)
96616 + return -EINVAL;
96617 +
96618 if (end == start)
96619 return 0;
96620
96621 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96622 if (!mm)
96623 return -EINVAL;
96624
96625 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96626 + if (mm != current->mm &&
96627 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96628 + err = -EPERM;
96629 + goto out;
96630 + }
96631 +#endif
96632 +
96633 /*
96634 * Check if this process has the right to modify the specified
96635 * process. The right exists if the process has administrative
96636 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96637 rcu_read_lock();
96638 tcred = __task_cred(task);
96639 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
96640 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
96641 - !capable(CAP_SYS_NICE)) {
96642 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
96643 rcu_read_unlock();
96644 err = -EPERM;
96645 goto out;
96646 @@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
96647 }
96648 #endif
96649
96650 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96651 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
96652 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
96653 + _mm->pax_flags & MF_PAX_SEGMEXEC))
96654 +#endif
96655 +
96656 /*
96657 * Display pages allocated per node and memory policy via /proc.
96658 */
96659 @@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
96660 int n;
96661 char buffer[50];
96662
96663 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96664 + if (current->exec_id != m->exec_id) {
96665 + gr_log_badprocpid("numa_maps");
96666 + return 0;
96667 + }
96668 +#endif
96669 +
96670 if (!mm)
96671 return 0;
96672
96673 @@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
96674 mpol_to_str(buffer, sizeof(buffer), pol, 0);
96675 mpol_cond_put(pol);
96676
96677 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96678 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
96679 +#else
96680 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
96681 +#endif
96682
96683 if (file) {
96684 seq_printf(m, " file=");
96685 - seq_path(m, &file->f_path, "\n\t= ");
96686 + seq_path(m, &file->f_path, "\n\t\\= ");
96687 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
96688 seq_printf(m, " heap");
96689 } else if (vma->vm_start <= mm->start_stack &&
96690 diff --git a/mm/migrate.c b/mm/migrate.c
96691 index aaca868..2ebecdc 100644
96692 --- a/mm/migrate.c
96693 +++ b/mm/migrate.c
96694 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
96695 unsigned long chunk_start;
96696 int err;
96697
96698 + pax_track_stack();
96699 +
96700 task_nodes = cpuset_mems_allowed(task);
96701
96702 err = -ENOMEM;
96703 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96704 if (!mm)
96705 return -EINVAL;
96706
96707 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96708 + if (mm != current->mm &&
96709 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96710 + err = -EPERM;
96711 + goto out;
96712 + }
96713 +#endif
96714 +
96715 /*
96716 * Check if this process has the right to modify the specified
96717 * process. The right exists if the process has administrative
96718 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96719 rcu_read_lock();
96720 tcred = __task_cred(task);
96721 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
96722 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
96723 - !capable(CAP_SYS_NICE)) {
96724 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
96725 rcu_read_unlock();
96726 err = -EPERM;
96727 goto out;
96728 diff --git a/mm/mlock.c b/mm/mlock.c
96729 index 2d846cf..98134d2 100644
96730 --- a/mm/mlock.c
96731 +++ b/mm/mlock.c
96732 @@ -13,6 +13,7 @@
96733 #include <linux/pagemap.h>
96734 #include <linux/mempolicy.h>
96735 #include <linux/syscalls.h>
96736 +#include <linux/security.h>
96737 #include <linux/sched.h>
96738 #include <linux/module.h>
96739 #include <linux/rmap.h>
96740 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
96741 }
96742 }
96743
96744 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
96745 -{
96746 - return (vma->vm_flags & VM_GROWSDOWN) &&
96747 - (vma->vm_start == addr) &&
96748 - !vma_stack_continue(vma->vm_prev, addr);
96749 -}
96750 -
96751 /**
96752 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
96753 * @vma: target vma
96754 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
96755 if (vma->vm_flags & VM_WRITE)
96756 gup_flags |= FOLL_WRITE;
96757
96758 - /* We don't try to access the guard page of a stack vma */
96759 - if (stack_guard_page(vma, start)) {
96760 - addr += PAGE_SIZE;
96761 - nr_pages--;
96762 - }
96763 -
96764 while (nr_pages > 0) {
96765 int i;
96766
96767 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96768 {
96769 unsigned long nstart, end, tmp;
96770 struct vm_area_struct * vma, * prev;
96771 - int error;
96772 + int error = -EINVAL;
96773
96774 len = PAGE_ALIGN(len);
96775 end = start + len;
96776 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96777 return -EINVAL;
96778 if (end == start)
96779 return 0;
96780 + if (end > TASK_SIZE)
96781 + return -EINVAL;
96782 +
96783 vma = find_vma_prev(current->mm, start, &prev);
96784 if (!vma || vma->vm_start > start)
96785 return -ENOMEM;
96786 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96787 for (nstart = start ; ; ) {
96788 unsigned int newflags;
96789
96790 +#ifdef CONFIG_PAX_SEGMEXEC
96791 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96792 + break;
96793 +#endif
96794 +
96795 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96796
96797 newflags = vma->vm_flags | VM_LOCKED;
96798 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96799 lock_limit >>= PAGE_SHIFT;
96800
96801 /* check against resource limits */
96802 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96803 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96804 error = do_mlock(start, len, 1);
96805 up_write(&current->mm->mmap_sem);
96806 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
96807 static int do_mlockall(int flags)
96808 {
96809 struct vm_area_struct * vma, * prev = NULL;
96810 - unsigned int def_flags = 0;
96811
96812 if (flags & MCL_FUTURE)
96813 - def_flags = VM_LOCKED;
96814 - current->mm->def_flags = def_flags;
96815 + current->mm->def_flags |= VM_LOCKED;
96816 + else
96817 + current->mm->def_flags &= ~VM_LOCKED;
96818 if (flags == MCL_FUTURE)
96819 goto out;
96820
96821 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96822 - unsigned int newflags;
96823 + unsigned long newflags;
96824
96825 +#ifdef CONFIG_PAX_SEGMEXEC
96826 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96827 + break;
96828 +#endif
96829 +
96830 + BUG_ON(vma->vm_end > TASK_SIZE);
96831 newflags = vma->vm_flags | VM_LOCKED;
96832 if (!(flags & MCL_CURRENT))
96833 newflags &= ~VM_LOCKED;
96834 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96835 lock_limit >>= PAGE_SHIFT;
96836
96837 ret = -ENOMEM;
96838 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96839 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96840 capable(CAP_IPC_LOCK))
96841 ret = do_mlockall(flags);
96842 diff --git a/mm/mmap.c b/mm/mmap.c
96843 index 4b80cbf..12a7861 100644
96844 --- a/mm/mmap.c
96845 +++ b/mm/mmap.c
96846 @@ -45,6 +45,16 @@
96847 #define arch_rebalance_pgtables(addr, len) (addr)
96848 #endif
96849
96850 +static inline void verify_mm_writelocked(struct mm_struct *mm)
96851 +{
96852 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96853 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96854 + up_read(&mm->mmap_sem);
96855 + BUG();
96856 + }
96857 +#endif
96858 +}
96859 +
96860 static void unmap_region(struct mm_struct *mm,
96861 struct vm_area_struct *vma, struct vm_area_struct *prev,
96862 unsigned long start, unsigned long end);
96863 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
96864 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96865 *
96866 */
96867 -pgprot_t protection_map[16] = {
96868 +pgprot_t protection_map[16] __read_only = {
96869 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96870 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96871 };
96872
96873 pgprot_t vm_get_page_prot(unsigned long vm_flags)
96874 {
96875 - return __pgprot(pgprot_val(protection_map[vm_flags &
96876 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96877 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96878 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96879 +
96880 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96881 + if (!nx_enabled &&
96882 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96883 + (vm_flags & (VM_READ | VM_WRITE)))
96884 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96885 +#endif
96886 +
96887 + return prot;
96888 }
96889 EXPORT_SYMBOL(vm_get_page_prot);
96890
96891 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
96892 int sysctl_overcommit_ratio = 50; /* default is 50% */
96893 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96894 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96895 struct percpu_counter vm_committed_as;
96896
96897 /*
96898 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96899 struct vm_area_struct *next = vma->vm_next;
96900
96901 might_sleep();
96902 + BUG_ON(vma->vm_mirror);
96903 if (vma->vm_ops && vma->vm_ops->close)
96904 vma->vm_ops->close(vma);
96905 if (vma->vm_file) {
96906 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96907 * not page aligned -Ram Gupta
96908 */
96909 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
96910 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
96911 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
96912 (mm->end_data - mm->start_data) > rlim)
96913 goto out;
96914 @@ -704,6 +726,12 @@ static int
96915 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96916 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96917 {
96918 +
96919 +#ifdef CONFIG_PAX_SEGMEXEC
96920 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96921 + return 0;
96922 +#endif
96923 +
96924 if (is_mergeable_vma(vma, file, vm_flags) &&
96925 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
96926 if (vma->vm_pgoff == vm_pgoff)
96927 @@ -723,6 +751,12 @@ static int
96928 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96929 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96930 {
96931 +
96932 +#ifdef CONFIG_PAX_SEGMEXEC
96933 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96934 + return 0;
96935 +#endif
96936 +
96937 if (is_mergeable_vma(vma, file, vm_flags) &&
96938 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
96939 pgoff_t vm_pglen;
96940 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96941 struct vm_area_struct *vma_merge(struct mm_struct *mm,
96942 struct vm_area_struct *prev, unsigned long addr,
96943 unsigned long end, unsigned long vm_flags,
96944 - struct anon_vma *anon_vma, struct file *file,
96945 + struct anon_vma *anon_vma, struct file *file,
96946 pgoff_t pgoff, struct mempolicy *policy)
96947 {
96948 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
96949 struct vm_area_struct *area, *next;
96950
96951 +#ifdef CONFIG_PAX_SEGMEXEC
96952 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
96953 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
96954 +
96955 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
96956 +#endif
96957 +
96958 /*
96959 * We later require that vma->vm_flags == vm_flags,
96960 * so this tests vma->vm_flags & VM_SPECIAL, too.
96961 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96962 if (next && next->vm_end == end) /* cases 6, 7, 8 */
96963 next = next->vm_next;
96964
96965 +#ifdef CONFIG_PAX_SEGMEXEC
96966 + if (prev)
96967 + prev_m = pax_find_mirror_vma(prev);
96968 + if (area)
96969 + area_m = pax_find_mirror_vma(area);
96970 + if (next)
96971 + next_m = pax_find_mirror_vma(next);
96972 +#endif
96973 +
96974 /*
96975 * Can it merge with the predecessor?
96976 */
96977 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96978 /* cases 1, 6 */
96979 vma_adjust(prev, prev->vm_start,
96980 next->vm_end, prev->vm_pgoff, NULL);
96981 - } else /* cases 2, 5, 7 */
96982 +
96983 +#ifdef CONFIG_PAX_SEGMEXEC
96984 + if (prev_m)
96985 + vma_adjust(prev_m, prev_m->vm_start,
96986 + next_m->vm_end, prev_m->vm_pgoff, NULL);
96987 +#endif
96988 +
96989 + } else { /* cases 2, 5, 7 */
96990 vma_adjust(prev, prev->vm_start,
96991 end, prev->vm_pgoff, NULL);
96992 +
96993 +#ifdef CONFIG_PAX_SEGMEXEC
96994 + if (prev_m)
96995 + vma_adjust(prev_m, prev_m->vm_start,
96996 + end_m, prev_m->vm_pgoff, NULL);
96997 +#endif
96998 +
96999 + }
97000 return prev;
97001 }
97002
97003 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97004 mpol_equal(policy, vma_policy(next)) &&
97005 can_vma_merge_before(next, vm_flags,
97006 anon_vma, file, pgoff+pglen)) {
97007 - if (prev && addr < prev->vm_end) /* case 4 */
97008 + if (prev && addr < prev->vm_end) { /* case 4 */
97009 vma_adjust(prev, prev->vm_start,
97010 addr, prev->vm_pgoff, NULL);
97011 - else /* cases 3, 8 */
97012 +
97013 +#ifdef CONFIG_PAX_SEGMEXEC
97014 + if (prev_m)
97015 + vma_adjust(prev_m, prev_m->vm_start,
97016 + addr_m, prev_m->vm_pgoff, NULL);
97017 +#endif
97018 +
97019 + } else { /* cases 3, 8 */
97020 vma_adjust(area, addr, next->vm_end,
97021 next->vm_pgoff - pglen, NULL);
97022 +
97023 +#ifdef CONFIG_PAX_SEGMEXEC
97024 + if (area_m)
97025 + vma_adjust(area_m, addr_m, next_m->vm_end,
97026 + next_m->vm_pgoff - pglen, NULL);
97027 +#endif
97028 +
97029 + }
97030 return area;
97031 }
97032
97033 @@ -898,14 +978,11 @@ none:
97034 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97035 struct file *file, long pages)
97036 {
97037 - const unsigned long stack_flags
97038 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97039 -
97040 if (file) {
97041 mm->shared_vm += pages;
97042 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97043 mm->exec_vm += pages;
97044 - } else if (flags & stack_flags)
97045 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97046 mm->stack_vm += pages;
97047 if (flags & (VM_RESERVED|VM_IO))
97048 mm->reserved_vm += pages;
97049 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97050 * (the exception is when the underlying filesystem is noexec
97051 * mounted, in which case we dont add PROT_EXEC.)
97052 */
97053 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97054 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97055 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97056 prot |= PROT_EXEC;
97057
97058 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97059 /* Obtain the address to map to. we verify (or select) it and ensure
97060 * that it represents a valid section of the address space.
97061 */
97062 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
97063 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97064 if (addr & ~PAGE_MASK)
97065 return addr;
97066
97067 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97068 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97069 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97070
97071 +#ifdef CONFIG_PAX_MPROTECT
97072 + if (mm->pax_flags & MF_PAX_MPROTECT) {
97073 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
97074 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97075 + gr_log_rwxmmap(file);
97076 +
97077 +#ifdef CONFIG_PAX_EMUPLT
97078 + vm_flags &= ~VM_EXEC;
97079 +#else
97080 + return -EPERM;
97081 +#endif
97082 +
97083 + }
97084 +
97085 + if (!(vm_flags & VM_EXEC))
97086 + vm_flags &= ~VM_MAYEXEC;
97087 +#else
97088 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97089 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97090 +#endif
97091 + else
97092 + vm_flags &= ~VM_MAYWRITE;
97093 + }
97094 +#endif
97095 +
97096 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97097 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97098 + vm_flags &= ~VM_PAGEEXEC;
97099 +#endif
97100 +
97101 if (flags & MAP_LOCKED)
97102 if (!can_do_mlock())
97103 return -EPERM;
97104 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97105 locked += mm->locked_vm;
97106 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
97107 lock_limit >>= PAGE_SHIFT;
97108 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97109 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97110 return -EAGAIN;
97111 }
97112 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97113 if (error)
97114 return error;
97115
97116 + if (!gr_acl_handle_mmap(file, prot))
97117 + return -EACCES;
97118 +
97119 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
97120 }
97121 EXPORT_SYMBOL(do_mmap_pgoff);
97122 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
97123 */
97124 int vma_wants_writenotify(struct vm_area_struct *vma)
97125 {
97126 - unsigned int vm_flags = vma->vm_flags;
97127 + unsigned long vm_flags = vma->vm_flags;
97128
97129 /* If it was private or non-writable, the write bit is already clear */
97130 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97131 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97132 return 0;
97133
97134 /* The backer wishes to know when pages are first written to? */
97135 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97136 unsigned long charged = 0;
97137 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
97138
97139 +#ifdef CONFIG_PAX_SEGMEXEC
97140 + struct vm_area_struct *vma_m = NULL;
97141 +#endif
97142 +
97143 + /*
97144 + * mm->mmap_sem is required to protect against another thread
97145 + * changing the mappings in case we sleep.
97146 + */
97147 + verify_mm_writelocked(mm);
97148 +
97149 /* Clear old maps */
97150 error = -ENOMEM;
97151 -munmap_back:
97152 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97153 if (vma && vma->vm_start < addr + len) {
97154 if (do_munmap(mm, addr, len))
97155 return -ENOMEM;
97156 - goto munmap_back;
97157 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97158 + BUG_ON(vma && vma->vm_start < addr + len);
97159 }
97160
97161 /* Check against address space limit. */
97162 @@ -1173,6 +1294,16 @@ munmap_back:
97163 goto unacct_error;
97164 }
97165
97166 +#ifdef CONFIG_PAX_SEGMEXEC
97167 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97168 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97169 + if (!vma_m) {
97170 + error = -ENOMEM;
97171 + goto free_vma;
97172 + }
97173 + }
97174 +#endif
97175 +
97176 vma->vm_mm = mm;
97177 vma->vm_start = addr;
97178 vma->vm_end = addr + len;
97179 @@ -1180,8 +1311,9 @@ munmap_back:
97180 vma->vm_page_prot = vm_get_page_prot(vm_flags);
97181 vma->vm_pgoff = pgoff;
97182
97183 + error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
97184 +
97185 if (file) {
97186 - error = -EINVAL;
97187 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
97188 goto free_vma;
97189 if (vm_flags & VM_DENYWRITE) {
97190 @@ -1195,6 +1327,19 @@ munmap_back:
97191 error = file->f_op->mmap(file, vma);
97192 if (error)
97193 goto unmap_and_free_vma;
97194 +
97195 +#ifdef CONFIG_PAX_SEGMEXEC
97196 + if (vma_m && (vm_flags & VM_EXECUTABLE))
97197 + added_exe_file_vma(mm);
97198 +#endif
97199 +
97200 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97201 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97202 + vma->vm_flags |= VM_PAGEEXEC;
97203 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97204 + }
97205 +#endif
97206 +
97207 if (vm_flags & VM_EXECUTABLE)
97208 added_exe_file_vma(mm);
97209
97210 @@ -1207,6 +1352,8 @@ munmap_back:
97211 pgoff = vma->vm_pgoff;
97212 vm_flags = vma->vm_flags;
97213 } else if (vm_flags & VM_SHARED) {
97214 + if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
97215 + goto free_vma;
97216 error = shmem_zero_setup(vma);
97217 if (error)
97218 goto free_vma;
97219 @@ -1218,6 +1365,11 @@ munmap_back:
97220 vma_link(mm, vma, prev, rb_link, rb_parent);
97221 file = vma->vm_file;
97222
97223 +#ifdef CONFIG_PAX_SEGMEXEC
97224 + if (vma_m)
97225 + pax_mirror_vma(vma_m, vma);
97226 +#endif
97227 +
97228 /* Once vma denies write, undo our temporary denial count */
97229 if (correct_wcount)
97230 atomic_inc(&inode->i_writecount);
97231 @@ -1226,6 +1378,7 @@ out:
97232
97233 mm->total_vm += len >> PAGE_SHIFT;
97234 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97235 + track_exec_limit(mm, addr, addr + len, vm_flags);
97236 if (vm_flags & VM_LOCKED) {
97237 /*
97238 * makes pages present; downgrades, drops, reacquires mmap_sem
97239 @@ -1248,6 +1401,12 @@ unmap_and_free_vma:
97240 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
97241 charged = 0;
97242 free_vma:
97243 +
97244 +#ifdef CONFIG_PAX_SEGMEXEC
97245 + if (vma_m)
97246 + kmem_cache_free(vm_area_cachep, vma_m);
97247 +#endif
97248 +
97249 kmem_cache_free(vm_area_cachep, vma);
97250 unacct_error:
97251 if (charged)
97252 @@ -1255,6 +1414,44 @@ unacct_error:
97253 return error;
97254 }
97255
97256 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
97257 +{
97258 + if (!vma) {
97259 +#ifdef CONFIG_STACK_GROWSUP
97260 + if (addr > sysctl_heap_stack_gap)
97261 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97262 + else
97263 + vma = find_vma(current->mm, 0);
97264 + if (vma && (vma->vm_flags & VM_GROWSUP))
97265 + return false;
97266 +#endif
97267 + return true;
97268 + }
97269 +
97270 + if (addr + len > vma->vm_start)
97271 + return false;
97272 +
97273 + if (vma->vm_flags & VM_GROWSDOWN)
97274 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97275 +#ifdef CONFIG_STACK_GROWSUP
97276 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97277 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
97278 +#endif
97279 +
97280 + return true;
97281 +}
97282 +
97283 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
97284 +{
97285 + if (vma->vm_start < len)
97286 + return -ENOMEM;
97287 + if (!(vma->vm_flags & VM_GROWSDOWN))
97288 + return vma->vm_start - len;
97289 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
97290 + return vma->vm_start - len - sysctl_heap_stack_gap;
97291 + return -ENOMEM;
97292 +}
97293 +
97294 /* Get an address range which is currently unmapped.
97295 * For shmat() with addr=0.
97296 *
97297 @@ -1281,18 +1478,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97298 if (flags & MAP_FIXED)
97299 return addr;
97300
97301 +#ifdef CONFIG_PAX_RANDMMAP
97302 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97303 +#endif
97304 +
97305 if (addr) {
97306 addr = PAGE_ALIGN(addr);
97307 - vma = find_vma(mm, addr);
97308 - if (TASK_SIZE - len >= addr &&
97309 - (!vma || addr + len <= vma->vm_start))
97310 - return addr;
97311 + if (TASK_SIZE - len >= addr) {
97312 + vma = find_vma(mm, addr);
97313 + if (check_heap_stack_gap(vma, addr, len))
97314 + return addr;
97315 + }
97316 }
97317 if (len > mm->cached_hole_size) {
97318 - start_addr = addr = mm->free_area_cache;
97319 + start_addr = addr = mm->free_area_cache;
97320 } else {
97321 - start_addr = addr = TASK_UNMAPPED_BASE;
97322 - mm->cached_hole_size = 0;
97323 + start_addr = addr = mm->mmap_base;
97324 + mm->cached_hole_size = 0;
97325 }
97326
97327 full_search:
97328 @@ -1303,34 +1505,40 @@ full_search:
97329 * Start a new search - just in case we missed
97330 * some holes.
97331 */
97332 - if (start_addr != TASK_UNMAPPED_BASE) {
97333 - addr = TASK_UNMAPPED_BASE;
97334 - start_addr = addr;
97335 + if (start_addr != mm->mmap_base) {
97336 + start_addr = addr = mm->mmap_base;
97337 mm->cached_hole_size = 0;
97338 goto full_search;
97339 }
97340 return -ENOMEM;
97341 }
97342 - if (!vma || addr + len <= vma->vm_start) {
97343 - /*
97344 - * Remember the place where we stopped the search:
97345 - */
97346 - mm->free_area_cache = addr + len;
97347 - return addr;
97348 - }
97349 + if (check_heap_stack_gap(vma, addr, len))
97350 + break;
97351 if (addr + mm->cached_hole_size < vma->vm_start)
97352 mm->cached_hole_size = vma->vm_start - addr;
97353 addr = vma->vm_end;
97354 }
97355 +
97356 + /*
97357 + * Remember the place where we stopped the search:
97358 + */
97359 + mm->free_area_cache = addr + len;
97360 + return addr;
97361 }
97362 #endif
97363
97364 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
97365 {
97366 +
97367 +#ifdef CONFIG_PAX_SEGMEXEC
97368 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
97369 + return;
97370 +#endif
97371 +
97372 /*
97373 * Is this a new hole at the lowest possible address?
97374 */
97375 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
97376 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
97377 mm->free_area_cache = addr;
97378 mm->cached_hole_size = ~0UL;
97379 }
97380 @@ -1348,7 +1556,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97381 {
97382 struct vm_area_struct *vma;
97383 struct mm_struct *mm = current->mm;
97384 - unsigned long addr = addr0;
97385 + unsigned long base = mm->mmap_base, addr = addr0;
97386
97387 /* requested length too big for entire address space */
97388 if (len > TASK_SIZE)
97389 @@ -1357,13 +1565,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97390 if (flags & MAP_FIXED)
97391 return addr;
97392
97393 +#ifdef CONFIG_PAX_RANDMMAP
97394 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97395 +#endif
97396 +
97397 /* requesting a specific address */
97398 if (addr) {
97399 addr = PAGE_ALIGN(addr);
97400 - vma = find_vma(mm, addr);
97401 - if (TASK_SIZE - len >= addr &&
97402 - (!vma || addr + len <= vma->vm_start))
97403 - return addr;
97404 + if (TASK_SIZE - len >= addr) {
97405 + vma = find_vma(mm, addr);
97406 + if (check_heap_stack_gap(vma, addr, len))
97407 + return addr;
97408 + }
97409 }
97410
97411 /* check if free_area_cache is useful for us */
97412 @@ -1378,7 +1591,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97413 /* make sure it can fit in the remaining address space */
97414 if (addr > len) {
97415 vma = find_vma(mm, addr-len);
97416 - if (!vma || addr <= vma->vm_start)
97417 + if (check_heap_stack_gap(vma, addr - len, len))
97418 /* remember the address as a hint for next time */
97419 return (mm->free_area_cache = addr-len);
97420 }
97421 @@ -1395,7 +1608,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97422 * return with success:
97423 */
97424 vma = find_vma(mm, addr);
97425 - if (!vma || addr+len <= vma->vm_start)
97426 + if (check_heap_stack_gap(vma, addr, len))
97427 /* remember the address as a hint for next time */
97428 return (mm->free_area_cache = addr);
97429
97430 @@ -1404,8 +1617,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97431 mm->cached_hole_size = vma->vm_start - addr;
97432
97433 /* try just below the current vma->vm_start */
97434 - addr = vma->vm_start-len;
97435 - } while (len < vma->vm_start);
97436 + addr = skip_heap_stack_gap(vma, len);
97437 + } while (!IS_ERR_VALUE(addr));
97438
97439 bottomup:
97440 /*
97441 @@ -1414,13 +1627,21 @@ bottomup:
97442 * can happen with large stack limits and large mmap()
97443 * allocations.
97444 */
97445 + mm->mmap_base = TASK_UNMAPPED_BASE;
97446 +
97447 +#ifdef CONFIG_PAX_RANDMMAP
97448 + if (mm->pax_flags & MF_PAX_RANDMMAP)
97449 + mm->mmap_base += mm->delta_mmap;
97450 +#endif
97451 +
97452 + mm->free_area_cache = mm->mmap_base;
97453 mm->cached_hole_size = ~0UL;
97454 - mm->free_area_cache = TASK_UNMAPPED_BASE;
97455 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
97456 /*
97457 * Restore the topdown base:
97458 */
97459 - mm->free_area_cache = mm->mmap_base;
97460 + mm->mmap_base = base;
97461 + mm->free_area_cache = base;
97462 mm->cached_hole_size = ~0UL;
97463
97464 return addr;
97465 @@ -1429,6 +1650,12 @@ bottomup:
97466
97467 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
97468 {
97469 +
97470 +#ifdef CONFIG_PAX_SEGMEXEC
97471 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
97472 + return;
97473 +#endif
97474 +
97475 /*
97476 * Is this a new hole at the highest possible address?
97477 */
97478 @@ -1436,8 +1663,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
97479 mm->free_area_cache = addr;
97480
97481 /* dont allow allocations above current base */
97482 - if (mm->free_area_cache > mm->mmap_base)
97483 + if (mm->free_area_cache > mm->mmap_base) {
97484 mm->free_area_cache = mm->mmap_base;
97485 + mm->cached_hole_size = ~0UL;
97486 + }
97487 }
97488
97489 unsigned long
97490 @@ -1510,40 +1739,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
97491
97492 EXPORT_SYMBOL(find_vma);
97493
97494 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
97495 +/*
97496 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
97497 + */
97498 struct vm_area_struct *
97499 find_vma_prev(struct mm_struct *mm, unsigned long addr,
97500 struct vm_area_struct **pprev)
97501 {
97502 - struct vm_area_struct *vma = NULL, *prev = NULL;
97503 - struct rb_node *rb_node;
97504 - if (!mm)
97505 - goto out;
97506 -
97507 - /* Guard against addr being lower than the first VMA */
97508 - vma = mm->mmap;
97509 -
97510 - /* Go through the RB tree quickly. */
97511 - rb_node = mm->mm_rb.rb_node;
97512 -
97513 - while (rb_node) {
97514 - struct vm_area_struct *vma_tmp;
97515 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
97516 -
97517 - if (addr < vma_tmp->vm_end) {
97518 - rb_node = rb_node->rb_left;
97519 - } else {
97520 - prev = vma_tmp;
97521 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
97522 - break;
97523 + struct vm_area_struct *vma;
97524 +
97525 + vma = find_vma(mm, addr);
97526 + if (vma) {
97527 + *pprev = vma->vm_prev;
97528 + } else {
97529 + struct rb_node *rb_node = mm->mm_rb.rb_node;
97530 + *pprev = NULL;
97531 + while (rb_node) {
97532 + *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
97533 rb_node = rb_node->rb_right;
97534 }
97535 }
97536 + return vma;
97537 +}
97538 +
97539 +#ifdef CONFIG_PAX_SEGMEXEC
97540 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97541 +{
97542 + struct vm_area_struct *vma_m;
97543
97544 -out:
97545 - *pprev = prev;
97546 - return prev ? prev->vm_next : vma;
97547 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97548 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97549 + BUG_ON(vma->vm_mirror);
97550 + return NULL;
97551 + }
97552 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97553 + vma_m = vma->vm_mirror;
97554 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97555 + BUG_ON(vma->vm_file != vma_m->vm_file);
97556 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97557 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
97558 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
97559 + return vma_m;
97560 }
97561 +#endif
97562
97563 /*
97564 * Verify that the stack growth is acceptable and
97565 @@ -1561,6 +1799,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97566 return -ENOMEM;
97567
97568 /* Stack limit test */
97569 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
97570 if (size > rlim[RLIMIT_STACK].rlim_cur)
97571 return -ENOMEM;
97572
97573 @@ -1570,6 +1809,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97574 unsigned long limit;
97575 locked = mm->locked_vm + grow;
97576 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
97577 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97578 if (locked > limit && !capable(CAP_IPC_LOCK))
97579 return -ENOMEM;
97580 }
97581 @@ -1600,37 +1840,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97582 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97583 * vma is the last one with address > vma->vm_end. Have to extend vma.
97584 */
97585 +#ifndef CONFIG_IA64
97586 +static
97587 +#endif
97588 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97589 {
97590 int error;
97591 + bool locknext;
97592
97593 if (!(vma->vm_flags & VM_GROWSUP))
97594 return -EFAULT;
97595
97596 + /* Also guard against wrapping around to address 0. */
97597 + if (address < PAGE_ALIGN(address+1))
97598 + address = PAGE_ALIGN(address+1);
97599 + else
97600 + return -ENOMEM;
97601 +
97602 /*
97603 * We must make sure the anon_vma is allocated
97604 * so that the anon_vma locking is not a noop.
97605 */
97606 if (unlikely(anon_vma_prepare(vma)))
97607 return -ENOMEM;
97608 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97609 + if (locknext && anon_vma_prepare(vma->vm_next))
97610 + return -ENOMEM;
97611 anon_vma_lock(vma);
97612 + if (locknext)
97613 + anon_vma_lock(vma->vm_next);
97614
97615 /*
97616 * vma->vm_start/vm_end cannot change under us because the caller
97617 * is required to hold the mmap_sem in read mode. We need the
97618 - * anon_vma lock to serialize against concurrent expand_stacks.
97619 - * Also guard against wrapping around to address 0.
97620 + * anon_vma locks to serialize against concurrent expand_stacks
97621 + * and expand_upwards.
97622 */
97623 - if (address < PAGE_ALIGN(address+4))
97624 - address = PAGE_ALIGN(address+4);
97625 - else {
97626 - anon_vma_unlock(vma);
97627 - return -ENOMEM;
97628 - }
97629 error = 0;
97630
97631 /* Somebody else might have raced and expanded it already */
97632 - if (address > vma->vm_end) {
97633 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97634 + error = -ENOMEM;
97635 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97636 unsigned long size, grow;
97637
97638 size = address - vma->vm_start;
97639 @@ -1643,6 +1894,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97640 vma->vm_end = address;
97641 }
97642 }
97643 + if (locknext)
97644 + anon_vma_unlock(vma->vm_next);
97645 anon_vma_unlock(vma);
97646 return error;
97647 }
97648 @@ -1655,6 +1908,8 @@ static int expand_downwards(struct vm_area_struct *vma,
97649 unsigned long address)
97650 {
97651 int error;
97652 + bool lockprev = false;
97653 + struct vm_area_struct *prev;
97654
97655 /*
97656 * We must make sure the anon_vma is allocated
97657 @@ -1668,6 +1923,15 @@ static int expand_downwards(struct vm_area_struct *vma,
97658 if (error)
97659 return error;
97660
97661 + prev = vma->vm_prev;
97662 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97663 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97664 +#endif
97665 + if (lockprev && anon_vma_prepare(prev))
97666 + return -ENOMEM;
97667 + if (lockprev)
97668 + anon_vma_lock(prev);
97669 +
97670 anon_vma_lock(vma);
97671
97672 /*
97673 @@ -1677,9 +1941,17 @@ static int expand_downwards(struct vm_area_struct *vma,
97674 */
97675
97676 /* Somebody else might have raced and expanded it already */
97677 - if (address < vma->vm_start) {
97678 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97679 + error = -ENOMEM;
97680 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97681 unsigned long size, grow;
97682
97683 +#ifdef CONFIG_PAX_SEGMEXEC
97684 + struct vm_area_struct *vma_m;
97685 +
97686 + vma_m = pax_find_mirror_vma(vma);
97687 +#endif
97688 +
97689 size = vma->vm_end - address;
97690 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97691
97692 @@ -1689,10 +1961,22 @@ static int expand_downwards(struct vm_area_struct *vma,
97693 if (!error) {
97694 vma->vm_start = address;
97695 vma->vm_pgoff -= grow;
97696 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97697 +
97698 +#ifdef CONFIG_PAX_SEGMEXEC
97699 + if (vma_m) {
97700 + vma_m->vm_start -= grow << PAGE_SHIFT;
97701 + vma_m->vm_pgoff -= grow;
97702 + }
97703 +#endif
97704 +
97705 +
97706 }
97707 }
97708 }
97709 anon_vma_unlock(vma);
97710 + if (lockprev)
97711 + anon_vma_unlock(prev);
97712 return error;
97713 }
97714
97715 @@ -1768,6 +2052,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97716 do {
97717 long nrpages = vma_pages(vma);
97718
97719 +#ifdef CONFIG_PAX_SEGMEXEC
97720 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97721 + vma = remove_vma(vma);
97722 + continue;
97723 + }
97724 +#endif
97725 +
97726 mm->total_vm -= nrpages;
97727 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97728 vma = remove_vma(vma);
97729 @@ -1813,6 +2104,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97730 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97731 vma->vm_prev = NULL;
97732 do {
97733 +
97734 +#ifdef CONFIG_PAX_SEGMEXEC
97735 + if (vma->vm_mirror) {
97736 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97737 + vma->vm_mirror->vm_mirror = NULL;
97738 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
97739 + vma->vm_mirror = NULL;
97740 + }
97741 +#endif
97742 +
97743 rb_erase(&vma->vm_rb, &mm->mm_rb);
97744 mm->map_count--;
97745 tail_vma = vma;
97746 @@ -1840,10 +2141,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97747 struct mempolicy *pol;
97748 struct vm_area_struct *new;
97749
97750 +#ifdef CONFIG_PAX_SEGMEXEC
97751 + struct vm_area_struct *vma_m, *new_m = NULL;
97752 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97753 +#endif
97754 +
97755 if (is_vm_hugetlb_page(vma) && (addr &
97756 ~(huge_page_mask(hstate_vma(vma)))))
97757 return -EINVAL;
97758
97759 +#ifdef CONFIG_PAX_SEGMEXEC
97760 + vma_m = pax_find_mirror_vma(vma);
97761 +
97762 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97763 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97764 + if (mm->map_count >= sysctl_max_map_count-1)
97765 + return -ENOMEM;
97766 + } else
97767 +#endif
97768 +
97769 if (mm->map_count >= sysctl_max_map_count)
97770 return -ENOMEM;
97771
97772 @@ -1851,6 +2167,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97773 if (!new)
97774 return -ENOMEM;
97775
97776 +#ifdef CONFIG_PAX_SEGMEXEC
97777 + if (vma_m) {
97778 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97779 + if (!new_m) {
97780 + kmem_cache_free(vm_area_cachep, new);
97781 + return -ENOMEM;
97782 + }
97783 + }
97784 +#endif
97785 +
97786 /* most fields are the same, copy all, and then fixup */
97787 *new = *vma;
97788
97789 @@ -1861,8 +2187,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97790 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97791 }
97792
97793 +#ifdef CONFIG_PAX_SEGMEXEC
97794 + if (vma_m) {
97795 + *new_m = *vma_m;
97796 + new_m->vm_mirror = new;
97797 + new->vm_mirror = new_m;
97798 +
97799 + if (new_below)
97800 + new_m->vm_end = addr_m;
97801 + else {
97802 + new_m->vm_start = addr_m;
97803 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97804 + }
97805 + }
97806 +#endif
97807 +
97808 pol = mpol_dup(vma_policy(vma));
97809 if (IS_ERR(pol)) {
97810 +
97811 +#ifdef CONFIG_PAX_SEGMEXEC
97812 + if (new_m)
97813 + kmem_cache_free(vm_area_cachep, new_m);
97814 +#endif
97815 +
97816 kmem_cache_free(vm_area_cachep, new);
97817 return PTR_ERR(pol);
97818 }
97819 @@ -1883,6 +2230,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97820 else
97821 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97822
97823 +#ifdef CONFIG_PAX_SEGMEXEC
97824 + if (vma_m) {
97825 + mpol_get(pol);
97826 + vma_set_policy(new_m, pol);
97827 +
97828 + if (new_m->vm_file) {
97829 + get_file(new_m->vm_file);
97830 + if (vma_m->vm_flags & VM_EXECUTABLE)
97831 + added_exe_file_vma(mm);
97832 + }
97833 +
97834 + if (new_m->vm_ops && new_m->vm_ops->open)
97835 + new_m->vm_ops->open(new_m);
97836 +
97837 + if (new_below)
97838 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97839 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97840 + else
97841 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97842 + }
97843 +#endif
97844 +
97845 return 0;
97846 }
97847
97848 @@ -1891,11 +2260,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97849 * work. This now handles partial unmappings.
97850 * Jeremy Fitzhardinge <jeremy@goop.org>
97851 */
97852 +#ifdef CONFIG_PAX_SEGMEXEC
97853 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97854 {
97855 + int ret = __do_munmap(mm, start, len);
97856 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97857 + return ret;
97858 +
97859 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97860 +}
97861 +
97862 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97863 +#else
97864 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97865 +#endif
97866 +{
97867 unsigned long end;
97868 struct vm_area_struct *vma, *prev, *last;
97869
97870 + /*
97871 + * mm->mmap_sem is required to protect against another thread
97872 + * changing the mappings in case we sleep.
97873 + */
97874 + verify_mm_writelocked(mm);
97875 +
97876 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97877 return -EINVAL;
97878
97879 @@ -1959,6 +2347,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97880 /* Fix up all other VM information */
97881 remove_vma_list(mm, vma);
97882
97883 + track_exec_limit(mm, start, end, 0UL);
97884 +
97885 return 0;
97886 }
97887
97888 @@ -1971,22 +2361,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97889
97890 profile_munmap(addr);
97891
97892 +#ifdef CONFIG_PAX_SEGMEXEC
97893 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97894 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
97895 + return -EINVAL;
97896 +#endif
97897 +
97898 down_write(&mm->mmap_sem);
97899 ret = do_munmap(mm, addr, len);
97900 up_write(&mm->mmap_sem);
97901 return ret;
97902 }
97903
97904 -static inline void verify_mm_writelocked(struct mm_struct *mm)
97905 -{
97906 -#ifdef CONFIG_DEBUG_VM
97907 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97908 - WARN_ON(1);
97909 - up_read(&mm->mmap_sem);
97910 - }
97911 -#endif
97912 -}
97913 -
97914 /*
97915 * this is really a simplified "do_mmap". it only handles
97916 * anonymous maps. eventually we may be able to do some
97917 @@ -2000,6 +2386,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97918 struct rb_node ** rb_link, * rb_parent;
97919 pgoff_t pgoff = addr >> PAGE_SHIFT;
97920 int error;
97921 + unsigned long charged;
97922
97923 len = PAGE_ALIGN(len);
97924 if (!len)
97925 @@ -2011,16 +2398,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97926
97927 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97928
97929 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97930 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97931 + flags &= ~VM_EXEC;
97932 +
97933 +#ifdef CONFIG_PAX_MPROTECT
97934 + if (mm->pax_flags & MF_PAX_MPROTECT)
97935 + flags &= ~VM_MAYEXEC;
97936 +#endif
97937 +
97938 + }
97939 +#endif
97940 +
97941 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97942 if (error & ~PAGE_MASK)
97943 return error;
97944
97945 + charged = len >> PAGE_SHIFT;
97946 +
97947 /*
97948 * mlock MCL_FUTURE?
97949 */
97950 if (mm->def_flags & VM_LOCKED) {
97951 unsigned long locked, lock_limit;
97952 - locked = len >> PAGE_SHIFT;
97953 + locked = charged;
97954 locked += mm->locked_vm;
97955 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
97956 lock_limit >>= PAGE_SHIFT;
97957 @@ -2037,22 +2438,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97958 /*
97959 * Clear old maps. this also does some error checking for us
97960 */
97961 - munmap_back:
97962 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97963 if (vma && vma->vm_start < addr + len) {
97964 if (do_munmap(mm, addr, len))
97965 return -ENOMEM;
97966 - goto munmap_back;
97967 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97968 + BUG_ON(vma && vma->vm_start < addr + len);
97969 }
97970
97971 /* Check against address space limits *after* clearing old maps... */
97972 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97973 + if (!may_expand_vm(mm, charged))
97974 return -ENOMEM;
97975
97976 if (mm->map_count > sysctl_max_map_count)
97977 return -ENOMEM;
97978
97979 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
97980 + if (security_vm_enough_memory(charged))
97981 return -ENOMEM;
97982
97983 /* Can we just expand an old private anonymous mapping? */
97984 @@ -2066,7 +2467,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97985 */
97986 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97987 if (!vma) {
97988 - vm_unacct_memory(len >> PAGE_SHIFT);
97989 + vm_unacct_memory(charged);
97990 return -ENOMEM;
97991 }
97992
97993 @@ -2078,11 +2479,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97994 vma->vm_page_prot = vm_get_page_prot(flags);
97995 vma_link(mm, vma, prev, rb_link, rb_parent);
97996 out:
97997 - mm->total_vm += len >> PAGE_SHIFT;
97998 + mm->total_vm += charged;
97999 if (flags & VM_LOCKED) {
98000 if (!mlock_vma_pages_range(vma, addr, addr + len))
98001 - mm->locked_vm += (len >> PAGE_SHIFT);
98002 + mm->locked_vm += charged;
98003 }
98004 + track_exec_limit(mm, addr, addr + len, flags);
98005 return addr;
98006 }
98007
98008 @@ -2129,8 +2531,10 @@ void exit_mmap(struct mm_struct *mm)
98009 * Walk the list again, actually closing and freeing it,
98010 * with preemption enabled, without holding any MM locks.
98011 */
98012 - while (vma)
98013 + while (vma) {
98014 + vma->vm_mirror = NULL;
98015 vma = remove_vma(vma);
98016 + }
98017
98018 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
98019 }
98020 @@ -2144,6 +2548,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
98021 struct vm_area_struct * __vma, * prev;
98022 struct rb_node ** rb_link, * rb_parent;
98023
98024 +#ifdef CONFIG_PAX_SEGMEXEC
98025 + struct vm_area_struct *vma_m = NULL;
98026 +#endif
98027 +
98028 /*
98029 * The vm_pgoff of a purely anonymous vma should be irrelevant
98030 * until its first write fault, when page's anon_vma and index
98031 @@ -2166,7 +2574,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
98032 if ((vma->vm_flags & VM_ACCOUNT) &&
98033 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98034 return -ENOMEM;
98035 +
98036 +#ifdef CONFIG_PAX_SEGMEXEC
98037 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98038 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98039 + if (!vma_m)
98040 + return -ENOMEM;
98041 + }
98042 +#endif
98043 +
98044 vma_link(mm, vma, prev, rb_link, rb_parent);
98045 +
98046 +#ifdef CONFIG_PAX_SEGMEXEC
98047 + if (vma_m)
98048 + pax_mirror_vma(vma_m, vma);
98049 +#endif
98050 +
98051 return 0;
98052 }
98053
98054 @@ -2184,6 +2607,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98055 struct rb_node **rb_link, *rb_parent;
98056 struct mempolicy *pol;
98057
98058 + BUG_ON(vma->vm_mirror);
98059 +
98060 /*
98061 * If anonymous vma has not yet been faulted, update new pgoff
98062 * to match new location, to increase its chance of merging.
98063 @@ -2227,6 +2652,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98064 return new_vma;
98065 }
98066
98067 +#ifdef CONFIG_PAX_SEGMEXEC
98068 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98069 +{
98070 + struct vm_area_struct *prev_m;
98071 + struct rb_node **rb_link_m, *rb_parent_m;
98072 + struct mempolicy *pol_m;
98073 +
98074 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98075 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98076 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98077 + *vma_m = *vma;
98078 + pol_m = vma_policy(vma_m);
98079 + mpol_get(pol_m);
98080 + vma_set_policy(vma_m, pol_m);
98081 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98082 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98083 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98084 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98085 + if (vma_m->vm_file)
98086 + get_file(vma_m->vm_file);
98087 + if (vma_m->vm_ops && vma_m->vm_ops->open)
98088 + vma_m->vm_ops->open(vma_m);
98089 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
98090 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98091 + vma_m->vm_mirror = vma;
98092 + vma->vm_mirror = vma_m;
98093 +}
98094 +#endif
98095 +
98096 /*
98097 * Return true if the calling process may expand its vm space by the passed
98098 * number of pages
98099 @@ -2237,7 +2691,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98100 unsigned long lim;
98101
98102 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
98103 -
98104 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98105 if (cur + npages > lim)
98106 return 0;
98107 return 1;
98108 @@ -2307,6 +2761,22 @@ int install_special_mapping(struct mm_struct *mm,
98109 vma->vm_start = addr;
98110 vma->vm_end = addr + len;
98111
98112 +#ifdef CONFIG_PAX_MPROTECT
98113 + if (mm->pax_flags & MF_PAX_MPROTECT) {
98114 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
98115 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98116 + return -EPERM;
98117 + if (!(vm_flags & VM_EXEC))
98118 + vm_flags &= ~VM_MAYEXEC;
98119 +#else
98120 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98121 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98122 +#endif
98123 + else
98124 + vm_flags &= ~VM_MAYWRITE;
98125 + }
98126 +#endif
98127 +
98128 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
98129 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98130
98131 diff --git a/mm/mprotect.c b/mm/mprotect.c
98132 index 1737c7e..c7faeb4 100644
98133 --- a/mm/mprotect.c
98134 +++ b/mm/mprotect.c
98135 @@ -24,10 +24,16 @@
98136 #include <linux/mmu_notifier.h>
98137 #include <linux/migrate.h>
98138 #include <linux/perf_event.h>
98139 +
98140 +#ifdef CONFIG_PAX_MPROTECT
98141 +#include <linux/elf.h>
98142 +#endif
98143 +
98144 #include <asm/uaccess.h>
98145 #include <asm/pgtable.h>
98146 #include <asm/cacheflush.h>
98147 #include <asm/tlbflush.h>
98148 +#include <asm/mmu_context.h>
98149
98150 #ifndef pgprot_modify
98151 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
98152 @@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
98153 flush_tlb_range(vma, start, end);
98154 }
98155
98156 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98157 +/* called while holding the mmap semaphor for writing except stack expansion */
98158 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98159 +{
98160 + unsigned long oldlimit, newlimit = 0UL;
98161 +
98162 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
98163 + return;
98164 +
98165 + spin_lock(&mm->page_table_lock);
98166 + oldlimit = mm->context.user_cs_limit;
98167 + if ((prot & VM_EXEC) && oldlimit < end)
98168 + /* USER_CS limit moved up */
98169 + newlimit = end;
98170 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98171 + /* USER_CS limit moved down */
98172 + newlimit = start;
98173 +
98174 + if (newlimit) {
98175 + mm->context.user_cs_limit = newlimit;
98176 +
98177 +#ifdef CONFIG_SMP
98178 + wmb();
98179 + cpus_clear(mm->context.cpu_user_cs_mask);
98180 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98181 +#endif
98182 +
98183 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98184 + }
98185 + spin_unlock(&mm->page_table_lock);
98186 + if (newlimit == end) {
98187 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
98188 +
98189 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
98190 + if (is_vm_hugetlb_page(vma))
98191 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98192 + else
98193 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
98194 + }
98195 +}
98196 +#endif
98197 +
98198 int
98199 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98200 unsigned long start, unsigned long end, unsigned long newflags)
98201 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98202 int error;
98203 int dirty_accountable = 0;
98204
98205 +#ifdef CONFIG_PAX_SEGMEXEC
98206 + struct vm_area_struct *vma_m = NULL;
98207 + unsigned long start_m, end_m;
98208 +
98209 + start_m = start + SEGMEXEC_TASK_SIZE;
98210 + end_m = end + SEGMEXEC_TASK_SIZE;
98211 +#endif
98212 +
98213 if (newflags == oldflags) {
98214 *pprev = vma;
98215 return 0;
98216 }
98217
98218 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98219 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98220 +
98221 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98222 + return -ENOMEM;
98223 +
98224 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98225 + return -ENOMEM;
98226 + }
98227 +
98228 /*
98229 * If we make a private mapping writable we increase our commit;
98230 * but (without finer accounting) cannot reduce our commit if we
98231 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98232 }
98233 }
98234
98235 +#ifdef CONFIG_PAX_SEGMEXEC
98236 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98237 + if (start != vma->vm_start) {
98238 + error = split_vma(mm, vma, start, 1);
98239 + if (error)
98240 + goto fail;
98241 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98242 + *pprev = (*pprev)->vm_next;
98243 + }
98244 +
98245 + if (end != vma->vm_end) {
98246 + error = split_vma(mm, vma, end, 0);
98247 + if (error)
98248 + goto fail;
98249 + }
98250 +
98251 + if (pax_find_mirror_vma(vma)) {
98252 + error = __do_munmap(mm, start_m, end_m - start_m);
98253 + if (error)
98254 + goto fail;
98255 + } else {
98256 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98257 + if (!vma_m) {
98258 + error = -ENOMEM;
98259 + goto fail;
98260 + }
98261 + vma->vm_flags = newflags;
98262 + pax_mirror_vma(vma_m, vma);
98263 + }
98264 + }
98265 +#endif
98266 +
98267 /*
98268 * First try to merge with previous and/or next vma.
98269 */
98270 @@ -195,9 +293,21 @@ success:
98271 * vm_flags and vm_page_prot are protected by the mmap_sem
98272 * held in write mode.
98273 */
98274 +
98275 +#ifdef CONFIG_PAX_SEGMEXEC
98276 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98277 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98278 +#endif
98279 +
98280 vma->vm_flags = newflags;
98281 +
98282 +#ifdef CONFIG_PAX_MPROTECT
98283 + if (mm->binfmt && mm->binfmt->handle_mprotect)
98284 + mm->binfmt->handle_mprotect(vma, newflags);
98285 +#endif
98286 +
98287 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
98288 - vm_get_page_prot(newflags));
98289 + vm_get_page_prot(vma->vm_flags));
98290
98291 if (vma_wants_writenotify(vma)) {
98292 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
98293 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98294 end = start + len;
98295 if (end <= start)
98296 return -ENOMEM;
98297 +
98298 +#ifdef CONFIG_PAX_SEGMEXEC
98299 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98300 + if (end > SEGMEXEC_TASK_SIZE)
98301 + return -EINVAL;
98302 + } else
98303 +#endif
98304 +
98305 + if (end > TASK_SIZE)
98306 + return -EINVAL;
98307 +
98308 if (!arch_validate_prot(prot))
98309 return -EINVAL;
98310
98311 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98312 /*
98313 * Does the application expect PROT_READ to imply PROT_EXEC:
98314 */
98315 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98316 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98317 prot |= PROT_EXEC;
98318
98319 vm_flags = calc_vm_prot_bits(prot);
98320 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98321 if (start > vma->vm_start)
98322 prev = vma;
98323
98324 +#ifdef CONFIG_PAX_MPROTECT
98325 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98326 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
98327 +#endif
98328 +
98329 for (nstart = start ; ; ) {
98330 unsigned long newflags;
98331
98332 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98333
98334 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98335 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98336 + if (prot & (PROT_WRITE | PROT_EXEC))
98337 + gr_log_rwxmprotect(vma->vm_file);
98338 +
98339 + error = -EACCES;
98340 + goto out;
98341 + }
98342 +
98343 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98344 error = -EACCES;
98345 goto out;
98346 }
98347 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98348 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98349 if (error)
98350 goto out;
98351 +
98352 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
98353 +
98354 nstart = tmp;
98355
98356 if (nstart < prev->vm_end)
98357 diff --git a/mm/mremap.c b/mm/mremap.c
98358 index 3e98d79..1706cec 100644
98359 --- a/mm/mremap.c
98360 +++ b/mm/mremap.c
98361 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98362 continue;
98363 pte = ptep_clear_flush(vma, old_addr, old_pte);
98364 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98365 +
98366 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98367 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98368 + pte = pte_exprotect(pte);
98369 +#endif
98370 +
98371 set_pte_at(mm, new_addr, new_pte, pte);
98372 }
98373
98374 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98375 if (is_vm_hugetlb_page(vma))
98376 goto Einval;
98377
98378 +#ifdef CONFIG_PAX_SEGMEXEC
98379 + if (pax_find_mirror_vma(vma))
98380 + goto Einval;
98381 +#endif
98382 +
98383 /* We can't remap across vm area boundaries */
98384 if (old_len > vma->vm_end - addr)
98385 goto Efault;
98386 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
98387 unsigned long ret = -EINVAL;
98388 unsigned long charged = 0;
98389 unsigned long map_flags;
98390 + unsigned long pax_task_size = TASK_SIZE;
98391
98392 if (new_addr & ~PAGE_MASK)
98393 goto out;
98394
98395 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98396 +#ifdef CONFIG_PAX_SEGMEXEC
98397 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
98398 + pax_task_size = SEGMEXEC_TASK_SIZE;
98399 +#endif
98400 +
98401 + pax_task_size -= PAGE_SIZE;
98402 +
98403 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98404 goto out;
98405
98406 /* Check if the location we're moving into overlaps the
98407 * old location at all, and fail if it does.
98408 */
98409 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
98410 - goto out;
98411 -
98412 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
98413 + if (addr + old_len > new_addr && new_addr + new_len > addr)
98414 goto out;
98415
98416 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
98417 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
98418 struct vm_area_struct *vma;
98419 unsigned long ret = -EINVAL;
98420 unsigned long charged = 0;
98421 + unsigned long pax_task_size = TASK_SIZE;
98422
98423 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98424 goto out;
98425 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
98426 if (!new_len)
98427 goto out;
98428
98429 +#ifdef CONFIG_PAX_SEGMEXEC
98430 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
98431 + pax_task_size = SEGMEXEC_TASK_SIZE;
98432 +#endif
98433 +
98434 + pax_task_size -= PAGE_SIZE;
98435 +
98436 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98437 + old_len > pax_task_size || addr > pax_task_size-old_len)
98438 + goto out;
98439 +
98440 if (flags & MREMAP_FIXED) {
98441 if (flags & MREMAP_MAYMOVE)
98442 ret = mremap_to(addr, old_len, new_addr, new_len);
98443 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
98444 addr + new_len);
98445 }
98446 ret = addr;
98447 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98448 goto out;
98449 }
98450 }
98451 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
98452 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
98453 if (ret)
98454 goto out;
98455 +
98456 + map_flags = vma->vm_flags;
98457 ret = move_vma(vma, addr, old_len, new_len, new_addr);
98458 + if (!(ret & ~PAGE_MASK)) {
98459 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98460 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98461 + }
98462 }
98463 out:
98464 if (ret & ~PAGE_MASK)
98465 diff --git a/mm/nommu.c b/mm/nommu.c
98466 index 406e8d4..53970d3 100644
98467 --- a/mm/nommu.c
98468 +++ b/mm/nommu.c
98469 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
98470 int sysctl_overcommit_ratio = 50; /* default is 50% */
98471 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98472 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98473 -int heap_stack_gap = 0;
98474
98475 atomic_long_t mmap_pages_allocated;
98476
98477 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98478 EXPORT_SYMBOL(find_vma);
98479
98480 /*
98481 - * find a VMA
98482 - * - we don't extend stack VMAs under NOMMU conditions
98483 - */
98484 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98485 -{
98486 - return find_vma(mm, addr);
98487 -}
98488 -
98489 -/*
98490 * expand a stack to a given address
98491 * - not supported under NOMMU conditions
98492 */
98493 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98494 index 3ecab7e..594a471 100644
98495 --- a/mm/page_alloc.c
98496 +++ b/mm/page_alloc.c
98497 @@ -289,7 +289,7 @@ out:
98498 * This usage means that zero-order pages may not be compound.
98499 */
98500
98501 -static void free_compound_page(struct page *page)
98502 +void free_compound_page(struct page *page)
98503 {
98504 __free_pages_ok(page, compound_order(page));
98505 }
98506 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98507 int bad = 0;
98508 int wasMlocked = __TestClearPageMlocked(page);
98509
98510 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
98511 + unsigned long index = 1UL << order;
98512 +#endif
98513 +
98514 kmemcheck_free_shadow(page, order);
98515
98516 for (i = 0 ; i < (1 << order) ; ++i)
98517 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98518 debug_check_no_obj_freed(page_address(page),
98519 PAGE_SIZE << order);
98520 }
98521 +
98522 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
98523 + for (; index; --index)
98524 + sanitize_highpage(page + index - 1);
98525 +#endif
98526 +
98527 arch_free_page(page, order);
98528 kernel_map_pages(page, 1 << order, 0);
98529
98530 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
98531 arch_alloc_page(page, order);
98532 kernel_map_pages(page, 1 << order, 1);
98533
98534 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
98535 if (gfp_flags & __GFP_ZERO)
98536 prep_zero_page(page, order, gfp_flags);
98537 +#endif
98538
98539 if (order && (gfp_flags & __GFP_COMP))
98540 prep_compound_page(page, order);
98541 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
98542 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
98543 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
98544 }
98545 +
98546 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
98547 + sanitize_highpage(page);
98548 +#endif
98549 +
98550 arch_free_page(page, 0);
98551 kernel_map_pages(page, 1, 0);
98552
98553 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
98554 int cpu;
98555 struct zone *zone;
98556
98557 + pax_track_stack();
98558 +
98559 for_each_populated_zone(zone) {
98560 show_node(zone);
98561 printk("%s per-cpu:\n", zone->name);
98562 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
98563 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
98564 }
98565 #else
98566 -static void inline setup_usemap(struct pglist_data *pgdat,
98567 +static inline void setup_usemap(struct pglist_data *pgdat,
98568 struct zone *zone, unsigned long zonesize) {}
98569 #endif /* CONFIG_SPARSEMEM */
98570
98571 diff --git a/mm/percpu.c b/mm/percpu.c
98572 index c90614a..5f7b7b8 100644
98573 --- a/mm/percpu.c
98574 +++ b/mm/percpu.c
98575 @@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98576 static unsigned int pcpu_high_unit_cpu __read_mostly;
98577
98578 /* the address of the first chunk which starts with the kernel static area */
98579 -void *pcpu_base_addr __read_mostly;
98580 +void *pcpu_base_addr __read_only;
98581 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98582
98583 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98584 diff --git a/mm/rmap.c b/mm/rmap.c
98585 index dd43373..d848cd7 100644
98586 --- a/mm/rmap.c
98587 +++ b/mm/rmap.c
98588 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98589 /* page_table_lock to protect against threads */
98590 spin_lock(&mm->page_table_lock);
98591 if (likely(!vma->anon_vma)) {
98592 +
98593 +#ifdef CONFIG_PAX_SEGMEXEC
98594 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98595 +
98596 + if (vma_m) {
98597 + BUG_ON(vma_m->anon_vma);
98598 + vma_m->anon_vma = anon_vma;
98599 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
98600 + }
98601 +#endif
98602 +
98603 vma->anon_vma = anon_vma;
98604 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
98605 allocated = NULL;
98606 diff --git a/mm/shmem.c b/mm/shmem.c
98607 index 3e0005b..1d659a8 100644
98608 --- a/mm/shmem.c
98609 +++ b/mm/shmem.c
98610 @@ -31,7 +31,7 @@
98611 #include <linux/swap.h>
98612 #include <linux/ima.h>
98613
98614 -static struct vfsmount *shm_mnt;
98615 +struct vfsmount *shm_mnt;
98616
98617 #ifdef CONFIG_SHMEM
98618 /*
98619 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
98620 goto unlock;
98621 }
98622 entry = shmem_swp_entry(info, index, NULL);
98623 + if (!entry)
98624 + goto unlock;
98625 if (entry->val) {
98626 /*
98627 * The more uptodate page coming down from a stacked
98628 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
98629 struct vm_area_struct pvma;
98630 struct page *page;
98631
98632 + pax_track_stack();
98633 +
98634 spol = mpol_cond_copy(&mpol,
98635 mpol_shared_policy_lookup(&info->policy, idx));
98636
98637 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
98638
98639 info = SHMEM_I(inode);
98640 inode->i_size = len-1;
98641 - if (len <= (char *)inode - (char *)info) {
98642 + if (len <= (char *)inode - (char *)info && len <= 64) {
98643 /* do it inline */
98644 memcpy(info, symname, len);
98645 inode->i_op = &shmem_symlink_inline_operations;
98646 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98647 int err = -ENOMEM;
98648
98649 /* Round up to L1_CACHE_BYTES to resist false sharing */
98650 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98651 - L1_CACHE_BYTES), GFP_KERNEL);
98652 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98653 if (!sbinfo)
98654 return -ENOMEM;
98655
98656 diff --git a/mm/slab.c b/mm/slab.c
98657 index c8d466a..909e01e 100644
98658 --- a/mm/slab.c
98659 +++ b/mm/slab.c
98660 @@ -174,7 +174,7 @@
98661
98662 /* Legal flag mask for kmem_cache_create(). */
98663 #if DEBUG
98664 -# define CREATE_MASK (SLAB_RED_ZONE | \
98665 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
98666 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
98667 SLAB_CACHE_DMA | \
98668 SLAB_STORE_USER | \
98669 @@ -182,7 +182,7 @@
98670 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
98671 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
98672 #else
98673 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
98674 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
98675 SLAB_CACHE_DMA | \
98676 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
98677 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
98678 @@ -308,7 +308,7 @@ struct kmem_list3 {
98679 * Need this for bootstrapping a per node allocator.
98680 */
98681 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
98682 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
98683 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
98684 #define CACHE_CACHE 0
98685 #define SIZE_AC MAX_NUMNODES
98686 #define SIZE_L3 (2 * MAX_NUMNODES)
98687 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
98688 if ((x)->max_freeable < i) \
98689 (x)->max_freeable = i; \
98690 } while (0)
98691 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98692 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98693 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98694 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98695 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98696 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98697 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98698 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98699 #else
98700 #define STATS_INC_ACTIVE(x) do { } while (0)
98701 #define STATS_DEC_ACTIVE(x) do { } while (0)
98702 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
98703 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98704 */
98705 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98706 - const struct slab *slab, void *obj)
98707 + const struct slab *slab, const void *obj)
98708 {
98709 u32 offset = (obj - slab->s_mem);
98710 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98711 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
98712 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
98713 sizes[INDEX_AC].cs_size,
98714 ARCH_KMALLOC_MINALIGN,
98715 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98716 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98717 NULL);
98718
98719 if (INDEX_AC != INDEX_L3) {
98720 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
98721 kmem_cache_create(names[INDEX_L3].name,
98722 sizes[INDEX_L3].cs_size,
98723 ARCH_KMALLOC_MINALIGN,
98724 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98725 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98726 NULL);
98727 }
98728
98729 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
98730 sizes->cs_cachep = kmem_cache_create(names->name,
98731 sizes->cs_size,
98732 ARCH_KMALLOC_MINALIGN,
98733 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98734 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98735 NULL);
98736 }
98737 #ifdef CONFIG_ZONE_DMA
98738 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
98739 }
98740 /* cpu stats */
98741 {
98742 - unsigned long allochit = atomic_read(&cachep->allochit);
98743 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
98744 - unsigned long freehit = atomic_read(&cachep->freehit);
98745 - unsigned long freemiss = atomic_read(&cachep->freemiss);
98746 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
98747 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
98748 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
98749 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
98750
98751 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
98752 allochit, allocmiss, freehit, freemiss);
98753 @@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
98754
98755 static int __init slab_proc_init(void)
98756 {
98757 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
98758 + mode_t gr_mode = S_IRUGO;
98759 +
98760 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
98761 + gr_mode = S_IRUSR;
98762 +#endif
98763 +
98764 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
98765 #ifdef CONFIG_DEBUG_SLAB_LEAK
98766 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
98767 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
98768 #endif
98769 return 0;
98770 }
98771 module_init(slab_proc_init);
98772 #endif
98773
98774 +void check_object_size(const void *ptr, unsigned long n, bool to)
98775 +{
98776 +
98777 +#ifdef CONFIG_PAX_USERCOPY
98778 + struct page *page;
98779 + struct kmem_cache *cachep = NULL;
98780 + struct slab *slabp;
98781 + unsigned int objnr;
98782 + unsigned long offset;
98783 + const char *type;
98784 +
98785 + if (!n)
98786 + return;
98787 +
98788 + type = "<null>";
98789 + if (ZERO_OR_NULL_PTR(ptr))
98790 + goto report;
98791 +
98792 + if (!virt_addr_valid(ptr))
98793 + return;
98794 +
98795 + page = virt_to_head_page(ptr);
98796 +
98797 + type = "<process stack>";
98798 + if (!PageSlab(page)) {
98799 + if (object_is_on_stack(ptr, n) == -1)
98800 + goto report;
98801 + return;
98802 + }
98803 +
98804 + cachep = page_get_cache(page);
98805 + type = cachep->name;
98806 + if (!(cachep->flags & SLAB_USERCOPY))
98807 + goto report;
98808 +
98809 + slabp = page_get_slab(page);
98810 + objnr = obj_to_index(cachep, slabp, ptr);
98811 + BUG_ON(objnr >= cachep->num);
98812 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
98813 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
98814 + return;
98815 +
98816 +report:
98817 + pax_report_usercopy(ptr, n, to, type);
98818 +#endif
98819 +
98820 +}
98821 +EXPORT_SYMBOL(check_object_size);
98822 +
98823 /**
98824 * ksize - get the actual amount of memory allocated for a given object
98825 * @objp: Pointer to the object
98826 diff --git a/mm/slob.c b/mm/slob.c
98827 index 837ebd6..0bd23bc 100644
98828 --- a/mm/slob.c
98829 +++ b/mm/slob.c
98830 @@ -29,7 +29,7 @@
98831 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
98832 * alloc_pages() directly, allocating compound pages so the page order
98833 * does not have to be separately tracked, and also stores the exact
98834 - * allocation size in page->private so that it can be used to accurately
98835 + * allocation size in slob_page->size so that it can be used to accurately
98836 * provide ksize(). These objects are detected in kfree() because slob_page()
98837 * is false for them.
98838 *
98839 @@ -58,6 +58,7 @@
98840 */
98841
98842 #include <linux/kernel.h>
98843 +#include <linux/sched.h>
98844 #include <linux/slab.h>
98845 #include <linux/mm.h>
98846 #include <linux/swap.h> /* struct reclaim_state */
98847 @@ -100,7 +101,8 @@ struct slob_page {
98848 unsigned long flags; /* mandatory */
98849 atomic_t _count; /* mandatory */
98850 slobidx_t units; /* free units left in page */
98851 - unsigned long pad[2];
98852 + unsigned long pad[1];
98853 + unsigned long size; /* size when >=PAGE_SIZE */
98854 slob_t *free; /* first free slob_t in page */
98855 struct list_head list; /* linked list of free pages */
98856 };
98857 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
98858 */
98859 static inline int is_slob_page(struct slob_page *sp)
98860 {
98861 - return PageSlab((struct page *)sp);
98862 + return PageSlab((struct page *)sp) && !sp->size;
98863 }
98864
98865 static inline void set_slob_page(struct slob_page *sp)
98866 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
98867
98868 static inline struct slob_page *slob_page(const void *addr)
98869 {
98870 - return (struct slob_page *)virt_to_page(addr);
98871 + return (struct slob_page *)virt_to_head_page(addr);
98872 }
98873
98874 /*
98875 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
98876 /*
98877 * Return the size of a slob block.
98878 */
98879 -static slobidx_t slob_units(slob_t *s)
98880 +static slobidx_t slob_units(const slob_t *s)
98881 {
98882 if (s->units > 0)
98883 return s->units;
98884 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
98885 /*
98886 * Return the next free slob block pointer after this one.
98887 */
98888 -static slob_t *slob_next(slob_t *s)
98889 +static slob_t *slob_next(const slob_t *s)
98890 {
98891 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
98892 slobidx_t next;
98893 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
98894 /*
98895 * Returns true if s is the last free block in its page.
98896 */
98897 -static int slob_last(slob_t *s)
98898 +static int slob_last(const slob_t *s)
98899 {
98900 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
98901 }
98902 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
98903 if (!page)
98904 return NULL;
98905
98906 + set_slob_page(page);
98907 return page_address(page);
98908 }
98909
98910 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
98911 if (!b)
98912 return NULL;
98913 sp = slob_page(b);
98914 - set_slob_page(sp);
98915
98916 spin_lock_irqsave(&slob_lock, flags);
98917 sp->units = SLOB_UNITS(PAGE_SIZE);
98918 sp->free = b;
98919 + sp->size = 0;
98920 INIT_LIST_HEAD(&sp->list);
98921 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
98922 set_slob_page_free(sp, slob_list);
98923 @@ -475,10 +478,9 @@ out:
98924 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
98925 #endif
98926
98927 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98928 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
98929 {
98930 - unsigned int *m;
98931 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98932 + slob_t *m;
98933 void *ret;
98934
98935 lockdep_trace_alloc(gfp);
98936 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98937
98938 if (!m)
98939 return NULL;
98940 - *m = size;
98941 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
98942 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
98943 + m[0].units = size;
98944 + m[1].units = align;
98945 ret = (void *)m + align;
98946
98947 trace_kmalloc_node(_RET_IP_, ret,
98948 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98949
98950 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
98951 if (ret) {
98952 - struct page *page;
98953 - page = virt_to_page(ret);
98954 - page->private = size;
98955 + struct slob_page *sp;
98956 + sp = slob_page(ret);
98957 + sp->size = size;
98958 }
98959
98960 trace_kmalloc_node(_RET_IP_, ret,
98961 size, PAGE_SIZE << order, gfp, node);
98962 }
98963
98964 - kmemleak_alloc(ret, size, 1, gfp);
98965 + return ret;
98966 +}
98967 +
98968 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98969 +{
98970 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98971 + void *ret = __kmalloc_node_align(size, gfp, node, align);
98972 +
98973 + if (!ZERO_OR_NULL_PTR(ret))
98974 + kmemleak_alloc(ret, size, 1, gfp);
98975 return ret;
98976 }
98977 EXPORT_SYMBOL(__kmalloc_node);
98978 @@ -528,13 +542,92 @@ void kfree(const void *block)
98979 sp = slob_page(block);
98980 if (is_slob_page(sp)) {
98981 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98982 - unsigned int *m = (unsigned int *)(block - align);
98983 - slob_free(m, *m + align);
98984 - } else
98985 + slob_t *m = (slob_t *)(block - align);
98986 + slob_free(m, m[0].units + align);
98987 + } else {
98988 + clear_slob_page(sp);
98989 + free_slob_page(sp);
98990 + sp->size = 0;
98991 put_page(&sp->page);
98992 + }
98993 }
98994 EXPORT_SYMBOL(kfree);
98995
98996 +void check_object_size(const void *ptr, unsigned long n, bool to)
98997 +{
98998 +
98999 +#ifdef CONFIG_PAX_USERCOPY
99000 + struct slob_page *sp;
99001 + const slob_t *free;
99002 + const void *base;
99003 + unsigned long flags;
99004 + const char *type;
99005 +
99006 + if (!n)
99007 + return;
99008 +
99009 + type = "<null>";
99010 + if (ZERO_OR_NULL_PTR(ptr))
99011 + goto report;
99012 +
99013 + if (!virt_addr_valid(ptr))
99014 + return;
99015 +
99016 + type = "<process stack>";
99017 + sp = slob_page(ptr);
99018 + if (!PageSlab((struct page *)sp)) {
99019 + if (object_is_on_stack(ptr, n) == -1)
99020 + goto report;
99021 + return;
99022 + }
99023 +
99024 + type = "<slob>";
99025 + if (sp->size) {
99026 + base = page_address(&sp->page);
99027 + if (base <= ptr && n <= sp->size - (ptr - base))
99028 + return;
99029 + goto report;
99030 + }
99031 +
99032 + /* some tricky double walking to find the chunk */
99033 + spin_lock_irqsave(&slob_lock, flags);
99034 + base = (void *)((unsigned long)ptr & PAGE_MASK);
99035 + free = sp->free;
99036 +
99037 + while (!slob_last(free) && (void *)free <= ptr) {
99038 + base = free + slob_units(free);
99039 + free = slob_next(free);
99040 + }
99041 +
99042 + while (base < (void *)free) {
99043 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99044 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
99045 + int offset;
99046 +
99047 + if (ptr < base + align)
99048 + break;
99049 +
99050 + offset = ptr - base - align;
99051 + if (offset >= m) {
99052 + base += size;
99053 + continue;
99054 + }
99055 +
99056 + if (n > m - offset)
99057 + break;
99058 +
99059 + spin_unlock_irqrestore(&slob_lock, flags);
99060 + return;
99061 + }
99062 +
99063 + spin_unlock_irqrestore(&slob_lock, flags);
99064 +report:
99065 + pax_report_usercopy(ptr, n, to, type);
99066 +#endif
99067 +
99068 +}
99069 +EXPORT_SYMBOL(check_object_size);
99070 +
99071 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99072 size_t ksize(const void *block)
99073 {
99074 @@ -547,10 +640,10 @@ size_t ksize(const void *block)
99075 sp = slob_page(block);
99076 if (is_slob_page(sp)) {
99077 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99078 - unsigned int *m = (unsigned int *)(block - align);
99079 - return SLOB_UNITS(*m) * SLOB_UNIT;
99080 + slob_t *m = (slob_t *)(block - align);
99081 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99082 } else
99083 - return sp->page.private;
99084 + return sp->size;
99085 }
99086 EXPORT_SYMBOL(ksize);
99087
99088 @@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99089 {
99090 struct kmem_cache *c;
99091
99092 +#ifdef CONFIG_PAX_USERCOPY
99093 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
99094 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
99095 +#else
99096 c = slob_alloc(sizeof(struct kmem_cache),
99097 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
99098 +#endif
99099
99100 if (c) {
99101 c->name = name;
99102 @@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99103 {
99104 void *b;
99105
99106 +#ifdef CONFIG_PAX_USERCOPY
99107 + b = __kmalloc_node_align(c->size, flags, node, c->align);
99108 +#else
99109 if (c->size < PAGE_SIZE) {
99110 b = slob_alloc(c->size, flags, c->align, node);
99111 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
99112 SLOB_UNITS(c->size) * SLOB_UNIT,
99113 flags, node);
99114 } else {
99115 + struct slob_page *sp;
99116 +
99117 b = slob_new_pages(flags, get_order(c->size), node);
99118 + sp = slob_page(b);
99119 + sp->size = c->size;
99120 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
99121 PAGE_SIZE << get_order(c->size),
99122 flags, node);
99123 }
99124 +#endif
99125
99126 if (c->ctor)
99127 c->ctor(b);
99128 @@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
99129
99130 static void __kmem_cache_free(void *b, int size)
99131 {
99132 - if (size < PAGE_SIZE)
99133 + struct slob_page *sp = slob_page(b);
99134 +
99135 + if (is_slob_page(sp))
99136 slob_free(b, size);
99137 - else
99138 + else {
99139 + clear_slob_page(sp);
99140 + free_slob_page(sp);
99141 + sp->size = 0;
99142 slob_free_pages(b, get_order(size));
99143 + }
99144 }
99145
99146 static void kmem_rcu_free(struct rcu_head *head)
99147 @@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
99148
99149 void kmem_cache_free(struct kmem_cache *c, void *b)
99150 {
99151 + int size = c->size;
99152 +
99153 +#ifdef CONFIG_PAX_USERCOPY
99154 + if (size + c->align < PAGE_SIZE) {
99155 + size += c->align;
99156 + b -= c->align;
99157 + }
99158 +#endif
99159 +
99160 kmemleak_free_recursive(b, c->flags);
99161 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99162 struct slob_rcu *slob_rcu;
99163 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99164 + slob_rcu = b + (size - sizeof(struct slob_rcu));
99165 INIT_RCU_HEAD(&slob_rcu->head);
99166 - slob_rcu->size = c->size;
99167 + slob_rcu->size = size;
99168 call_rcu(&slob_rcu->head, kmem_rcu_free);
99169 } else {
99170 - __kmem_cache_free(b, c->size);
99171 + __kmem_cache_free(b, size);
99172 }
99173
99174 +#ifdef CONFIG_PAX_USERCOPY
99175 + trace_kfree(_RET_IP_, b);
99176 +#else
99177 trace_kmem_cache_free(_RET_IP_, b);
99178 +#endif
99179 +
99180 }
99181 EXPORT_SYMBOL(kmem_cache_free);
99182
99183 diff --git a/mm/slub.c b/mm/slub.c
99184 index 4996fc7..87e01d0 100644
99185 --- a/mm/slub.c
99186 +++ b/mm/slub.c
99187 @@ -201,7 +201,7 @@ struct track {
99188
99189 enum track_item { TRACK_ALLOC, TRACK_FREE };
99190
99191 -#ifdef CONFIG_SLUB_DEBUG
99192 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99193 static int sysfs_slab_add(struct kmem_cache *);
99194 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99195 static void sysfs_slab_remove(struct kmem_cache *);
99196 @@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
99197 if (!t->addr)
99198 return;
99199
99200 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99201 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99202 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99203 }
99204
99205 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
99206
99207 page = virt_to_head_page(x);
99208
99209 + BUG_ON(!PageSlab(page));
99210 +
99211 slab_free(s, page, x, _RET_IP_);
99212
99213 trace_kmem_cache_free(_RET_IP_, x);
99214 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
99215 * Merge control. If this is set then no merging of slab caches will occur.
99216 * (Could be removed. This was introduced to pacify the merge skeptics.)
99217 */
99218 -static int slub_nomerge;
99219 +static int slub_nomerge = 1;
99220
99221 /*
99222 * Calculate the order of allocation given an slab object size.
99223 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
99224 * list to avoid pounding the page allocator excessively.
99225 */
99226 set_min_partial(s, ilog2(s->size));
99227 - s->refcount = 1;
99228 + atomic_set(&s->refcount, 1);
99229 #ifdef CONFIG_NUMA
99230 s->remote_node_defrag_ratio = 1000;
99231 #endif
99232 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
99233 void kmem_cache_destroy(struct kmem_cache *s)
99234 {
99235 down_write(&slub_lock);
99236 - s->refcount--;
99237 - if (!s->refcount) {
99238 + if (atomic_dec_and_test(&s->refcount)) {
99239 list_del(&s->list);
99240 up_write(&slub_lock);
99241 if (kmem_cache_close(s)) {
99242 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
99243 __setup("slub_nomerge", setup_slub_nomerge);
99244
99245 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
99246 - const char *name, int size, gfp_t gfp_flags)
99247 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
99248 {
99249 - unsigned int flags = 0;
99250 -
99251 if (gfp_flags & SLUB_DMA)
99252 - flags = SLAB_CACHE_DMA;
99253 + flags |= SLAB_CACHE_DMA;
99254
99255 /*
99256 * This function is called with IRQs disabled during early-boot on
99257 @@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99258 EXPORT_SYMBOL(__kmalloc_node);
99259 #endif
99260
99261 +void check_object_size(const void *ptr, unsigned long n, bool to)
99262 +{
99263 +
99264 +#ifdef CONFIG_PAX_USERCOPY
99265 + struct page *page;
99266 + struct kmem_cache *s = NULL;
99267 + unsigned long offset;
99268 + const char *type;
99269 +
99270 + if (!n)
99271 + return;
99272 +
99273 + type = "<null>";
99274 + if (ZERO_OR_NULL_PTR(ptr))
99275 + goto report;
99276 +
99277 + if (!virt_addr_valid(ptr))
99278 + return;
99279 +
99280 + page = get_object_page(ptr);
99281 +
99282 + type = "<process stack>";
99283 + if (!page) {
99284 + if (object_is_on_stack(ptr, n) == -1)
99285 + goto report;
99286 + return;
99287 + }
99288 +
99289 + s = page->slab;
99290 + type = s->name;
99291 + if (!(s->flags & SLAB_USERCOPY))
99292 + goto report;
99293 +
99294 + offset = (ptr - page_address(page)) % s->size;
99295 + if (offset <= s->objsize && n <= s->objsize - offset)
99296 + return;
99297 +
99298 +report:
99299 + pax_report_usercopy(ptr, n, to, type);
99300 +#endif
99301 +
99302 +}
99303 +EXPORT_SYMBOL(check_object_size);
99304 +
99305 size_t ksize(const void *object)
99306 {
99307 struct page *page;
99308 @@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
99309 * kmem_cache_open for slab_state == DOWN.
99310 */
99311 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
99312 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
99313 - kmalloc_caches[0].refcount = -1;
99314 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
99315 + atomic_set(&kmalloc_caches[0].refcount, -1);
99316 caches++;
99317
99318 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
99319 @@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
99320 /* Caches that are not of the two-to-the-power-of size */
99321 if (KMALLOC_MIN_SIZE <= 32) {
99322 create_kmalloc_cache(&kmalloc_caches[1],
99323 - "kmalloc-96", 96, GFP_NOWAIT);
99324 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
99325 caches++;
99326 }
99327 if (KMALLOC_MIN_SIZE <= 64) {
99328 create_kmalloc_cache(&kmalloc_caches[2],
99329 - "kmalloc-192", 192, GFP_NOWAIT);
99330 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
99331 caches++;
99332 }
99333
99334 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
99335 create_kmalloc_cache(&kmalloc_caches[i],
99336 - "kmalloc", 1 << i, GFP_NOWAIT);
99337 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
99338 caches++;
99339 }
99340
99341 @@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
99342 /*
99343 * We may have set a slab to be unmergeable during bootstrap.
99344 */
99345 - if (s->refcount < 0)
99346 + if (atomic_read(&s->refcount) < 0)
99347 return 1;
99348
99349 return 0;
99350 @@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99351 if (s) {
99352 int cpu;
99353
99354 - s->refcount++;
99355 + atomic_inc(&s->refcount);
99356 /*
99357 * Adjust the object sizes so that we clear
99358 * the complete object on kzalloc.
99359 @@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99360
99361 if (sysfs_slab_alias(s, name)) {
99362 down_write(&slub_lock);
99363 - s->refcount--;
99364 + atomic_dec(&s->refcount);
99365 up_write(&slub_lock);
99366 goto err;
99367 }
99368 @@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
99369
99370 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99371 {
99372 - return sprintf(buf, "%d\n", s->refcount - 1);
99373 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
99374 }
99375 SLAB_ATTR_RO(aliases);
99376
99377 @@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
99378 kfree(s);
99379 }
99380
99381 -static struct sysfs_ops slab_sysfs_ops = {
99382 +static const struct sysfs_ops slab_sysfs_ops = {
99383 .show = slab_attr_show,
99384 .store = slab_attr_store,
99385 };
99386 @@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
99387 return 0;
99388 }
99389
99390 -static struct kset_uevent_ops slab_uevent_ops = {
99391 +static const struct kset_uevent_ops slab_uevent_ops = {
99392 .filter = uevent_filter,
99393 };
99394
99395 @@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
99396 return name;
99397 }
99398
99399 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99400 static int sysfs_slab_add(struct kmem_cache *s)
99401 {
99402 int err;
99403 @@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
99404 kobject_del(&s->kobj);
99405 kobject_put(&s->kobj);
99406 }
99407 +#endif
99408
99409 /*
99410 * Need to buffer aliases during bootup until sysfs becomes
99411 @@ -4632,6 +4677,7 @@ struct saved_alias {
99412
99413 static struct saved_alias *alias_list;
99414
99415 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99416 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99417 {
99418 struct saved_alias *al;
99419 @@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99420 alias_list = al;
99421 return 0;
99422 }
99423 +#endif
99424
99425 static int __init slab_sysfs_init(void)
99426 {
99427 @@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
99428
99429 static int __init slab_proc_init(void)
99430 {
99431 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
99432 + mode_t gr_mode = S_IRUGO;
99433 +
99434 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
99435 + gr_mode = S_IRUSR;
99436 +#endif
99437 +
99438 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
99439 return 0;
99440 }
99441 module_init(slab_proc_init);
99442 diff --git a/mm/swap.c b/mm/swap.c
99443 index 308e57d..5de19c0 100644
99444 --- a/mm/swap.c
99445 +++ b/mm/swap.c
99446 @@ -30,6 +30,7 @@
99447 #include <linux/notifier.h>
99448 #include <linux/backing-dev.h>
99449 #include <linux/memcontrol.h>
99450 +#include <linux/hugetlb.h>
99451
99452 #include "internal.h"
99453
99454 @@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
99455 compound_page_dtor *dtor;
99456
99457 dtor = get_compound_page_dtor(page);
99458 + if (!PageHuge(page))
99459 + BUG_ON(dtor != free_compound_page);
99460 (*dtor)(page);
99461 }
99462 }
99463 diff --git a/mm/util.c b/mm/util.c
99464 index e48b493..24a601d 100644
99465 --- a/mm/util.c
99466 +++ b/mm/util.c
99467 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
99468 void arch_pick_mmap_layout(struct mm_struct *mm)
99469 {
99470 mm->mmap_base = TASK_UNMAPPED_BASE;
99471 +
99472 +#ifdef CONFIG_PAX_RANDMMAP
99473 + if (mm->pax_flags & MF_PAX_RANDMMAP)
99474 + mm->mmap_base += mm->delta_mmap;
99475 +#endif
99476 +
99477 mm->get_unmapped_area = arch_get_unmapped_area;
99478 mm->unmap_area = arch_unmap_area;
99479 }
99480 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
99481 index f34ffd0..90d7407 100644
99482 --- a/mm/vmalloc.c
99483 +++ b/mm/vmalloc.c
99484 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
99485
99486 pte = pte_offset_kernel(pmd, addr);
99487 do {
99488 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
99489 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
99490 +
99491 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99492 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
99493 + BUG_ON(!pte_exec(*pte));
99494 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
99495 + continue;
99496 + }
99497 +#endif
99498 +
99499 + {
99500 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
99501 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
99502 + }
99503 } while (pte++, addr += PAGE_SIZE, addr != end);
99504 }
99505
99506 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
99507 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
99508 {
99509 pte_t *pte;
99510 + int ret = -ENOMEM;
99511
99512 /*
99513 * nr is a running index into the array which helps higher level
99514 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
99515 pte = pte_alloc_kernel(pmd, addr);
99516 if (!pte)
99517 return -ENOMEM;
99518 +
99519 + pax_open_kernel();
99520 do {
99521 struct page *page = pages[*nr];
99522
99523 - if (WARN_ON(!pte_none(*pte)))
99524 - return -EBUSY;
99525 - if (WARN_ON(!page))
99526 - return -ENOMEM;
99527 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99528 + if (!(pgprot_val(prot) & _PAGE_NX))
99529 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
99530 + else
99531 +#endif
99532 +
99533 + if (WARN_ON(!pte_none(*pte))) {
99534 + ret = -EBUSY;
99535 + goto out;
99536 + }
99537 + if (WARN_ON(!page)) {
99538 + ret = -ENOMEM;
99539 + goto out;
99540 + }
99541 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
99542 (*nr)++;
99543 } while (pte++, addr += PAGE_SIZE, addr != end);
99544 - return 0;
99545 + ret = 0;
99546 +out:
99547 + pax_close_kernel();
99548 + return ret;
99549 }
99550
99551 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
99552 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
99553 * and fall back on vmalloc() if that fails. Others
99554 * just put it in the vmalloc space.
99555 */
99556 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
99557 +#ifdef CONFIG_MODULES
99558 +#ifdef MODULES_VADDR
99559 unsigned long addr = (unsigned long)x;
99560 if (addr >= MODULES_VADDR && addr < MODULES_END)
99561 return 1;
99562 #endif
99563 +
99564 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99565 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
99566 + return 1;
99567 +#endif
99568 +
99569 +#endif
99570 +
99571 return is_vmalloc_addr(x);
99572 }
99573
99574 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
99575
99576 if (!pgd_none(*pgd)) {
99577 pud_t *pud = pud_offset(pgd, addr);
99578 +#ifdef CONFIG_X86
99579 + if (!pud_large(*pud))
99580 +#endif
99581 if (!pud_none(*pud)) {
99582 pmd_t *pmd = pmd_offset(pud, addr);
99583 +#ifdef CONFIG_X86
99584 + if (!pmd_large(*pmd))
99585 +#endif
99586 if (!pmd_none(*pmd)) {
99587 pte_t *ptep, pte;
99588
99589 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
99590 struct rb_node *tmp;
99591
99592 while (*p) {
99593 - struct vmap_area *tmp;
99594 + struct vmap_area *varea;
99595
99596 parent = *p;
99597 - tmp = rb_entry(parent, struct vmap_area, rb_node);
99598 - if (va->va_start < tmp->va_end)
99599 + varea = rb_entry(parent, struct vmap_area, rb_node);
99600 + if (va->va_start < varea->va_end)
99601 p = &(*p)->rb_left;
99602 - else if (va->va_end > tmp->va_start)
99603 + else if (va->va_end > varea->va_start)
99604 p = &(*p)->rb_right;
99605 else
99606 BUG();
99607 @@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
99608 struct vm_struct *area;
99609
99610 BUG_ON(in_interrupt());
99611 +
99612 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99613 + if (flags & VM_KERNEXEC) {
99614 + if (start != VMALLOC_START || end != VMALLOC_END)
99615 + return NULL;
99616 + start = (unsigned long)MODULES_EXEC_VADDR;
99617 + end = (unsigned long)MODULES_EXEC_END;
99618 + }
99619 +#endif
99620 +
99621 if (flags & VM_IOREMAP) {
99622 int bit = fls(size);
99623
99624 @@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
99625 if (count > totalram_pages)
99626 return NULL;
99627
99628 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99629 + if (!(pgprot_val(prot) & _PAGE_NX))
99630 + flags |= VM_KERNEXEC;
99631 +#endif
99632 +
99633 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
99634 __builtin_return_address(0));
99635 if (!area)
99636 @@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
99637 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
99638 return NULL;
99639
99640 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99641 + if (!(pgprot_val(prot) & _PAGE_NX))
99642 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
99643 + VMALLOC_START, VMALLOC_END, node,
99644 + gfp_mask, caller);
99645 + else
99646 +#endif
99647 +
99648 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
99649 VMALLOC_START, VMALLOC_END, node,
99650 gfp_mask, caller);
99651 @@ -1698,10 +1763,9 @@ EXPORT_SYMBOL(vmalloc_node);
99652 * For tight control over page level allocator and protection flags
99653 * use __vmalloc() instead.
99654 */
99655 -
99656 void *vmalloc_exec(unsigned long size)
99657 {
99658 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
99659 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
99660 -1, __builtin_return_address(0));
99661 }
99662
99663 @@ -1998,6 +2062,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
99664 unsigned long uaddr = vma->vm_start;
99665 unsigned long usize = vma->vm_end - vma->vm_start;
99666
99667 + BUG_ON(vma->vm_mirror);
99668 +
99669 if ((PAGE_SIZE-1) & (unsigned long)addr)
99670 return -EINVAL;
99671
99672 diff --git a/mm/vmstat.c b/mm/vmstat.c
99673 index 42d76c6..5643dc4 100644
99674 --- a/mm/vmstat.c
99675 +++ b/mm/vmstat.c
99676 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
99677 *
99678 * vm_stat contains the global counters
99679 */
99680 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99681 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99682 EXPORT_SYMBOL(vm_stat);
99683
99684 #ifdef CONFIG_SMP
99685 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
99686 v = p->vm_stat_diff[i];
99687 p->vm_stat_diff[i] = 0;
99688 local_irq_restore(flags);
99689 - atomic_long_add(v, &zone->vm_stat[i]);
99690 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
99691 global_diff[i] += v;
99692 #ifdef CONFIG_NUMA
99693 /* 3 seconds idle till flush */
99694 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
99695
99696 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
99697 if (global_diff[i])
99698 - atomic_long_add(global_diff[i], &vm_stat[i]);
99699 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
99700 }
99701
99702 #endif
99703 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
99704 start_cpu_timer(cpu);
99705 #endif
99706 #ifdef CONFIG_PROC_FS
99707 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
99708 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
99709 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
99710 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
99711 + {
99712 + mode_t gr_mode = S_IRUGO;
99713 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
99714 + gr_mode = S_IRUSR;
99715 +#endif
99716 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
99717 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
99718 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
99719 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
99720 +#else
99721 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
99722 +#endif
99723 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
99724 + }
99725 #endif
99726 return 0;
99727 }
99728 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
99729 index a29c5ab..6143f20 100644
99730 --- a/net/8021q/vlan.c
99731 +++ b/net/8021q/vlan.c
99732 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
99733 err = -EPERM;
99734 if (!capable(CAP_NET_ADMIN))
99735 break;
99736 - if ((args.u.name_type >= 0) &&
99737 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
99738 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
99739 struct vlan_net *vn;
99740
99741 vn = net_generic(net, vlan_net_id);
99742 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
99743 index a2d2984..f9eb711 100644
99744 --- a/net/9p/trans_fd.c
99745 +++ b/net/9p/trans_fd.c
99746 @@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
99747 oldfs = get_fs();
99748 set_fs(get_ds());
99749 /* The cast to a user pointer is valid due to the set_fs() */
99750 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
99751 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
99752 set_fs(oldfs);
99753
99754 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
99755 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
99756 index 02cc7e7..4514f1b 100644
99757 --- a/net/atm/atm_misc.c
99758 +++ b/net/atm/atm_misc.c
99759 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
99760 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
99761 return 1;
99762 atm_return(vcc,truesize);
99763 - atomic_inc(&vcc->stats->rx_drop);
99764 + atomic_inc_unchecked(&vcc->stats->rx_drop);
99765 return 0;
99766 }
99767
99768 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
99769 }
99770 }
99771 atm_return(vcc,guess);
99772 - atomic_inc(&vcc->stats->rx_drop);
99773 + atomic_inc_unchecked(&vcc->stats->rx_drop);
99774 return NULL;
99775 }
99776
99777 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
99778
99779 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99780 {
99781 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
99782 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
99783 __SONET_ITEMS
99784 #undef __HANDLE_ITEM
99785 }
99786 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99787
99788 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99789 {
99790 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
99791 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
99792 __SONET_ITEMS
99793 #undef __HANDLE_ITEM
99794 }
99795 diff --git a/net/atm/lec.h b/net/atm/lec.h
99796 index 9d14d19..5c145f3 100644
99797 --- a/net/atm/lec.h
99798 +++ b/net/atm/lec.h
99799 @@ -48,7 +48,7 @@ struct lane2_ops {
99800 const u8 *tlvs, u32 sizeoftlvs);
99801 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
99802 const u8 *tlvs, u32 sizeoftlvs);
99803 -};
99804 +} __no_const;
99805
99806 /*
99807 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
99808 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
99809 index 0919a88..a23d54e 100644
99810 --- a/net/atm/mpc.h
99811 +++ b/net/atm/mpc.h
99812 @@ -33,7 +33,7 @@ struct mpoa_client {
99813 struct mpc_parameters parameters; /* parameters for this client */
99814
99815 const struct net_device_ops *old_ops;
99816 - struct net_device_ops new_ops;
99817 + net_device_ops_no_const new_ops;
99818 };
99819
99820
99821 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
99822 index 4504a4b..1733f1e 100644
99823 --- a/net/atm/mpoa_caches.c
99824 +++ b/net/atm/mpoa_caches.c
99825 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
99826 struct timeval now;
99827 struct k_message msg;
99828
99829 + pax_track_stack();
99830 +
99831 do_gettimeofday(&now);
99832
99833 write_lock_irq(&client->egress_lock);
99834 diff --git a/net/atm/proc.c b/net/atm/proc.c
99835 index ab8419a..aa91497 100644
99836 --- a/net/atm/proc.c
99837 +++ b/net/atm/proc.c
99838 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
99839 const struct k_atm_aal_stats *stats)
99840 {
99841 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
99842 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
99843 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
99844 - atomic_read(&stats->rx_drop));
99845 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
99846 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
99847 + atomic_read_unchecked(&stats->rx_drop));
99848 }
99849
99850 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
99851 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
99852 {
99853 struct sock *sk = sk_atm(vcc);
99854
99855 +#ifdef CONFIG_GRKERNSEC_HIDESYM
99856 + seq_printf(seq, "%p ", NULL);
99857 +#else
99858 seq_printf(seq, "%p ", vcc);
99859 +#endif
99860 +
99861 if (!vcc->dev)
99862 seq_printf(seq, "Unassigned ");
99863 else
99864 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
99865 {
99866 if (!vcc->dev)
99867 seq_printf(seq, sizeof(void *) == 4 ?
99868 +#ifdef CONFIG_GRKERNSEC_HIDESYM
99869 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
99870 +#else
99871 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
99872 +#endif
99873 else
99874 seq_printf(seq, "%3d %3d %5d ",
99875 vcc->dev->number, vcc->vpi, vcc->vci);
99876 diff --git a/net/atm/resources.c b/net/atm/resources.c
99877 index 56b7322..c48b84e 100644
99878 --- a/net/atm/resources.c
99879 +++ b/net/atm/resources.c
99880 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
99881 static void copy_aal_stats(struct k_atm_aal_stats *from,
99882 struct atm_aal_stats *to)
99883 {
99884 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
99885 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
99886 __AAL_STAT_ITEMS
99887 #undef __HANDLE_ITEM
99888 }
99889 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
99890 static void subtract_aal_stats(struct k_atm_aal_stats *from,
99891 struct atm_aal_stats *to)
99892 {
99893 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
99894 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
99895 __AAL_STAT_ITEMS
99896 #undef __HANDLE_ITEM
99897 }
99898 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
99899 index 8567d47..bba2292 100644
99900 --- a/net/bridge/br_private.h
99901 +++ b/net/bridge/br_private.h
99902 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
99903
99904 #ifdef CONFIG_SYSFS
99905 /* br_sysfs_if.c */
99906 -extern struct sysfs_ops brport_sysfs_ops;
99907 +extern const struct sysfs_ops brport_sysfs_ops;
99908 extern int br_sysfs_addif(struct net_bridge_port *p);
99909
99910 /* br_sysfs_br.c */
99911 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
99912 index 9a52ac5..c97538e 100644
99913 --- a/net/bridge/br_stp_if.c
99914 +++ b/net/bridge/br_stp_if.c
99915 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
99916 char *envp[] = { NULL };
99917
99918 if (br->stp_enabled == BR_USER_STP) {
99919 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
99920 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
99921 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
99922 br->dev->name, r);
99923
99924 diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
99925 index 820643a..ce77fb3 100644
99926 --- a/net/bridge/br_sysfs_if.c
99927 +++ b/net/bridge/br_sysfs_if.c
99928 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
99929 return ret;
99930 }
99931
99932 -struct sysfs_ops brport_sysfs_ops = {
99933 +const struct sysfs_ops brport_sysfs_ops = {
99934 .show = brport_show,
99935 .store = brport_store,
99936 };
99937 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
99938 index d73d47f..72df42a 100644
99939 --- a/net/bridge/netfilter/ebtables.c
99940 +++ b/net/bridge/netfilter/ebtables.c
99941 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
99942 unsigned int entries_size, nentries;
99943 char *entries;
99944
99945 + pax_track_stack();
99946 +
99947 if (cmd == EBT_SO_GET_ENTRIES) {
99948 entries_size = t->private->entries_size;
99949 nentries = t->private->nentries;
99950 diff --git a/net/can/bcm.c b/net/can/bcm.c
99951 index 2ffd2e0..72a7486 100644
99952 --- a/net/can/bcm.c
99953 +++ b/net/can/bcm.c
99954 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
99955 struct bcm_sock *bo = bcm_sk(sk);
99956 struct bcm_op *op;
99957
99958 +#ifdef CONFIG_GRKERNSEC_HIDESYM
99959 + seq_printf(m, ">>> socket %p", NULL);
99960 + seq_printf(m, " / sk %p", NULL);
99961 + seq_printf(m, " / bo %p", NULL);
99962 +#else
99963 seq_printf(m, ">>> socket %p", sk->sk_socket);
99964 seq_printf(m, " / sk %p", sk);
99965 seq_printf(m, " / bo %p", bo);
99966 +#endif
99967 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
99968 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
99969 seq_printf(m, " <<<\n");
99970 diff --git a/net/compat.c b/net/compat.c
99971 index 9559afc..ccd74e1 100644
99972 --- a/net/compat.c
99973 +++ b/net/compat.c
99974 @@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
99975 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
99976 __get_user(kmsg->msg_flags, &umsg->msg_flags))
99977 return -EFAULT;
99978 - kmsg->msg_name = compat_ptr(tmp1);
99979 - kmsg->msg_iov = compat_ptr(tmp2);
99980 - kmsg->msg_control = compat_ptr(tmp3);
99981 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
99982 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
99983 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
99984 return 0;
99985 }
99986
99987 @@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
99988 kern_msg->msg_name = NULL;
99989
99990 tot_len = iov_from_user_compat_to_kern(kern_iov,
99991 - (struct compat_iovec __user *)kern_msg->msg_iov,
99992 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
99993 kern_msg->msg_iovlen);
99994 if (tot_len >= 0)
99995 kern_msg->msg_iov = kern_iov;
99996 @@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
99997
99998 #define CMSG_COMPAT_FIRSTHDR(msg) \
99999 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
100000 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
100001 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
100002 (struct compat_cmsghdr __user *)NULL)
100003
100004 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
100005 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
100006 (ucmlen) <= (unsigned long) \
100007 ((mhdr)->msg_controllen - \
100008 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
100009 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
100010
100011 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
100012 struct compat_cmsghdr __user *cmsg, int cmsg_len)
100013 {
100014 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
100015 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
100016 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
100017 msg->msg_controllen)
100018 return NULL;
100019 return (struct compat_cmsghdr __user *)ptr;
100020 @@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
100021 {
100022 struct compat_timeval ctv;
100023 struct compat_timespec cts[3];
100024 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
100025 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
100026 struct compat_cmsghdr cmhdr;
100027 int cmlen;
100028
100029 @@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
100030
100031 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
100032 {
100033 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
100034 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
100035 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
100036 int fdnum = scm->fp->count;
100037 struct file **fp = scm->fp->fp;
100038 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
100039 len = sizeof(ktime);
100040 old_fs = get_fs();
100041 set_fs(KERNEL_DS);
100042 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
100043 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
100044 set_fs(old_fs);
100045
100046 if (!err) {
100047 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
100048 case MCAST_JOIN_GROUP:
100049 case MCAST_LEAVE_GROUP:
100050 {
100051 - struct compat_group_req __user *gr32 = (void *)optval;
100052 + struct compat_group_req __user *gr32 = (void __user *)optval;
100053 struct group_req __user *kgr =
100054 compat_alloc_user_space(sizeof(struct group_req));
100055 u32 interface;
100056 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
100057 case MCAST_BLOCK_SOURCE:
100058 case MCAST_UNBLOCK_SOURCE:
100059 {
100060 - struct compat_group_source_req __user *gsr32 = (void *)optval;
100061 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
100062 struct group_source_req __user *kgsr = compat_alloc_user_space(
100063 sizeof(struct group_source_req));
100064 u32 interface;
100065 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
100066 }
100067 case MCAST_MSFILTER:
100068 {
100069 - struct compat_group_filter __user *gf32 = (void *)optval;
100070 + struct compat_group_filter __user *gf32 = (void __user *)optval;
100071 struct group_filter __user *kgf;
100072 u32 interface, fmode, numsrc;
100073
100074 diff --git a/net/core/dev.c b/net/core/dev.c
100075 index 84a0705..575db4c 100644
100076 --- a/net/core/dev.c
100077 +++ b/net/core/dev.c
100078 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
100079 if (no_module && capable(CAP_NET_ADMIN))
100080 no_module = request_module("netdev-%s", name);
100081 if (no_module && capable(CAP_SYS_MODULE)) {
100082 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
100083 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
100084 +#else
100085 if (!request_module("%s", name))
100086 pr_err("Loading kernel module for a network device "
100087 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
100088 "instead\n", name);
100089 +#endif
100090 }
100091 }
100092 EXPORT_SYMBOL(dev_load);
100093 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
100094
100095 struct dev_gso_cb {
100096 void (*destructor)(struct sk_buff *skb);
100097 -};
100098 +} __no_const;
100099
100100 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
100101
100102 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
100103 }
100104 EXPORT_SYMBOL(netif_rx_ni);
100105
100106 -static void net_tx_action(struct softirq_action *h)
100107 +static void net_tx_action(void)
100108 {
100109 struct softnet_data *sd = &__get_cpu_var(softnet_data);
100110
100111 @@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
100112 EXPORT_SYMBOL(netif_napi_del);
100113
100114
100115 -static void net_rx_action(struct softirq_action *h)
100116 +static void net_rx_action(void)
100117 {
100118 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
100119 unsigned long time_limit = jiffies + 2;
100120 diff --git a/net/core/flow.c b/net/core/flow.c
100121 index 9601587..8c4824e 100644
100122 --- a/net/core/flow.c
100123 +++ b/net/core/flow.c
100124 @@ -35,11 +35,11 @@ struct flow_cache_entry {
100125 atomic_t *object_ref;
100126 };
100127
100128 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
100129 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
100130
100131 static u32 flow_hash_shift;
100132 #define flow_hash_size (1 << flow_hash_shift)
100133 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
100134 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
100135
100136 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
100137
100138 @@ -52,7 +52,7 @@ struct flow_percpu_info {
100139 u32 hash_rnd;
100140 int count;
100141 };
100142 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
100143 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
100144
100145 #define flow_hash_rnd_recalc(cpu) \
100146 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
100147 @@ -69,7 +69,7 @@ struct flow_flush_info {
100148 atomic_t cpuleft;
100149 struct completion completion;
100150 };
100151 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
100152 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
100153
100154 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
100155
100156 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
100157 if (fle->family == family &&
100158 fle->dir == dir &&
100159 flow_key_compare(key, &fle->key) == 0) {
100160 - if (fle->genid == atomic_read(&flow_cache_genid)) {
100161 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
100162 void *ret = fle->object;
100163
100164 if (ret)
100165 @@ -228,7 +228,7 @@ nocache:
100166 err = resolver(net, key, family, dir, &obj, &obj_ref);
100167
100168 if (fle && !err) {
100169 - fle->genid = atomic_read(&flow_cache_genid);
100170 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
100171
100172 if (fle->object)
100173 atomic_dec(fle->object_ref);
100174 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
100175
100176 fle = flow_table(cpu)[i];
100177 for (; fle; fle = fle->next) {
100178 - unsigned genid = atomic_read(&flow_cache_genid);
100179 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
100180
100181 if (!fle->object || fle->genid == genid)
100182 continue;
100183 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
100184 index d4fd895..ac9b1e6 100644
100185 --- a/net/core/rtnetlink.c
100186 +++ b/net/core/rtnetlink.c
100187 @@ -57,7 +57,7 @@ struct rtnl_link
100188 {
100189 rtnl_doit_func doit;
100190 rtnl_dumpit_func dumpit;
100191 -};
100192 +} __no_const;
100193
100194 static DEFINE_MUTEX(rtnl_mutex);
100195
100196 diff --git a/net/core/scm.c b/net/core/scm.c
100197 index d98eafc..1a190a9 100644
100198 --- a/net/core/scm.c
100199 +++ b/net/core/scm.c
100200 @@ -191,7 +191,7 @@ error:
100201 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
100202 {
100203 struct cmsghdr __user *cm
100204 - = (__force struct cmsghdr __user *)msg->msg_control;
100205 + = (struct cmsghdr __force_user *)msg->msg_control;
100206 struct cmsghdr cmhdr;
100207 int cmlen = CMSG_LEN(len);
100208 int err;
100209 @@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
100210 err = -EFAULT;
100211 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
100212 goto out;
100213 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
100214 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
100215 goto out;
100216 cmlen = CMSG_SPACE(len);
100217 if (msg->msg_controllen < cmlen)
100218 @@ -229,7 +229,7 @@ out:
100219 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
100220 {
100221 struct cmsghdr __user *cm
100222 - = (__force struct cmsghdr __user*)msg->msg_control;
100223 + = (struct cmsghdr __force_user *)msg->msg_control;
100224
100225 int fdmax = 0;
100226 int fdnum = scm->fp->count;
100227 @@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
100228 if (fdnum < fdmax)
100229 fdmax = fdnum;
100230
100231 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
100232 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
100233 i++, cmfptr++)
100234 {
100235 int new_fd;
100236 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
100237 index 45329d7..626aaa6 100644
100238 --- a/net/core/secure_seq.c
100239 +++ b/net/core/secure_seq.c
100240 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
100241 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
100242
100243 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
100244 - __be16 dport)
100245 + __be16 dport)
100246 {
100247 u32 secret[MD5_MESSAGE_BYTES / 4];
100248 u32 hash[MD5_DIGEST_WORDS];
100249 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
100250 secret[i] = net_secret[i];
100251
100252 md5_transform(hash, secret);
100253 -
100254 return hash[0];
100255 }
100256 #endif
100257 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
100258 index 025f924..70a71c4 100644
100259 --- a/net/core/skbuff.c
100260 +++ b/net/core/skbuff.c
100261 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
100262 struct sk_buff *frag_iter;
100263 struct sock *sk = skb->sk;
100264
100265 + pax_track_stack();
100266 +
100267 /*
100268 * __skb_splice_bits() only fails if the output has no room left,
100269 * so no point in going over the frag_list for the error case.
100270 diff --git a/net/core/sock.c b/net/core/sock.c
100271 index 6605e75..3acebda 100644
100272 --- a/net/core/sock.c
100273 +++ b/net/core/sock.c
100274 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
100275 break;
100276
100277 case SO_PEERCRED:
100278 + {
100279 + struct ucred peercred;
100280 if (len > sizeof(sk->sk_peercred))
100281 len = sizeof(sk->sk_peercred);
100282 - if (copy_to_user(optval, &sk->sk_peercred, len))
100283 + peercred = sk->sk_peercred;
100284 + if (copy_to_user(optval, &peercred, len))
100285 return -EFAULT;
100286 goto lenout;
100287 + }
100288
100289 case SO_PEERNAME:
100290 {
100291 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
100292 */
100293 smp_wmb();
100294 atomic_set(&sk->sk_refcnt, 1);
100295 - atomic_set(&sk->sk_drops, 0);
100296 + atomic_set_unchecked(&sk->sk_drops, 0);
100297 }
100298 EXPORT_SYMBOL(sock_init_data);
100299
100300 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
100301 index 2036568..c55883d 100644
100302 --- a/net/decnet/sysctl_net_decnet.c
100303 +++ b/net/decnet/sysctl_net_decnet.c
100304 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
100305
100306 if (len > *lenp) len = *lenp;
100307
100308 - if (copy_to_user(buffer, addr, len))
100309 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
100310 return -EFAULT;
100311
100312 *lenp = len;
100313 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
100314
100315 if (len > *lenp) len = *lenp;
100316
100317 - if (copy_to_user(buffer, devname, len))
100318 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
100319 return -EFAULT;
100320
100321 *lenp = len;
100322 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
100323 index 39a2d29..f39c0fe 100644
100324 --- a/net/econet/Kconfig
100325 +++ b/net/econet/Kconfig
100326 @@ -4,7 +4,7 @@
100327
100328 config ECONET
100329 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
100330 - depends on EXPERIMENTAL && INET
100331 + depends on EXPERIMENTAL && INET && BROKEN
100332 ---help---
100333 Econet is a fairly old and slow networking protocol mainly used by
100334 Acorn computers to access file and print servers. It uses native
100335 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
100336 index a413b1b..380849c 100644
100337 --- a/net/ieee802154/dgram.c
100338 +++ b/net/ieee802154/dgram.c
100339 @@ -318,7 +318,7 @@ out:
100340 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
100341 {
100342 if (sock_queue_rcv_skb(sk, skb) < 0) {
100343 - atomic_inc(&sk->sk_drops);
100344 + atomic_inc_unchecked(&sk->sk_drops);
100345 kfree_skb(skb);
100346 return NET_RX_DROP;
100347 }
100348 diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
100349 index 30e74ee..bfc6ee0 100644
100350 --- a/net/ieee802154/raw.c
100351 +++ b/net/ieee802154/raw.c
100352 @@ -206,7 +206,7 @@ out:
100353 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
100354 {
100355 if (sock_queue_rcv_skb(sk, skb) < 0) {
100356 - atomic_inc(&sk->sk_drops);
100357 + atomic_inc_unchecked(&sk->sk_drops);
100358 kfree_skb(skb);
100359 return NET_RX_DROP;
100360 }
100361 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
100362 index dba56d2..acee5d6 100644
100363 --- a/net/ipv4/inet_diag.c
100364 +++ b/net/ipv4/inet_diag.c
100365 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
100366 r->idiag_retrans = 0;
100367
100368 r->id.idiag_if = sk->sk_bound_dev_if;
100369 +#ifdef CONFIG_GRKERNSEC_HIDESYM
100370 + r->id.idiag_cookie[0] = 0;
100371 + r->id.idiag_cookie[1] = 0;
100372 +#else
100373 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
100374 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
100375 +#endif
100376
100377 r->id.idiag_sport = inet->sport;
100378 r->id.idiag_dport = inet->dport;
100379 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
100380 r->idiag_family = tw->tw_family;
100381 r->idiag_retrans = 0;
100382 r->id.idiag_if = tw->tw_bound_dev_if;
100383 +
100384 +#ifdef CONFIG_GRKERNSEC_HIDESYM
100385 + r->id.idiag_cookie[0] = 0;
100386 + r->id.idiag_cookie[1] = 0;
100387 +#else
100388 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
100389 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
100390 +#endif
100391 +
100392 r->id.idiag_sport = tw->tw_sport;
100393 r->id.idiag_dport = tw->tw_dport;
100394 r->id.idiag_src[0] = tw->tw_rcv_saddr;
100395 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
100396 if (sk == NULL)
100397 goto unlock;
100398
100399 +#ifndef CONFIG_GRKERNSEC_HIDESYM
100400 err = -ESTALE;
100401 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
100402 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
100403 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
100404 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
100405 goto out;
100406 +#endif
100407
100408 err = -ENOMEM;
100409 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
100410 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
100411 r->idiag_retrans = req->retrans;
100412
100413 r->id.idiag_if = sk->sk_bound_dev_if;
100414 +
100415 +#ifdef CONFIG_GRKERNSEC_HIDESYM
100416 + r->id.idiag_cookie[0] = 0;
100417 + r->id.idiag_cookie[1] = 0;
100418 +#else
100419 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
100420 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
100421 +#endif
100422
100423 tmo = req->expires - jiffies;
100424 if (tmo < 0)
100425 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
100426 index d717267..56de7e7 100644
100427 --- a/net/ipv4/inet_hashtables.c
100428 +++ b/net/ipv4/inet_hashtables.c
100429 @@ -18,12 +18,15 @@
100430 #include <linux/sched.h>
100431 #include <linux/slab.h>
100432 #include <linux/wait.h>
100433 +#include <linux/security.h>
100434
100435 #include <net/inet_connection_sock.h>
100436 #include <net/inet_hashtables.h>
100437 #include <net/secure_seq.h>
100438 #include <net/ip.h>
100439
100440 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
100441 +
100442 /*
100443 * Allocate and initialize a new local port bind bucket.
100444 * The bindhash mutex for snum's hash chain must be held here.
100445 @@ -491,6 +494,8 @@ ok:
100446 }
100447 spin_unlock(&head->lock);
100448
100449 + gr_update_task_in_ip_table(current, inet_sk(sk));
100450 +
100451 if (tw) {
100452 inet_twsk_deschedule(tw, death_row);
100453 inet_twsk_put(tw);
100454 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
100455 index 13b229f..6956484 100644
100456 --- a/net/ipv4/inetpeer.c
100457 +++ b/net/ipv4/inetpeer.c
100458 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
100459 struct inet_peer *p, *n;
100460 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
100461
100462 + pax_track_stack();
100463 +
100464 /* Look up for the address quickly. */
100465 read_lock_bh(&peer_pool_lock);
100466 p = lookup(daddr, NULL);
100467 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
100468 return NULL;
100469 n->v4daddr = daddr;
100470 atomic_set(&n->refcnt, 1);
100471 - atomic_set(&n->rid, 0);
100472 + atomic_set_unchecked(&n->rid, 0);
100473 n->ip_id_count = secure_ip_id(daddr);
100474 n->tcp_ts_stamp = 0;
100475
100476 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
100477 index d3fe10b..feeafc9 100644
100478 --- a/net/ipv4/ip_fragment.c
100479 +++ b/net/ipv4/ip_fragment.c
100480 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
100481 return 0;
100482
100483 start = qp->rid;
100484 - end = atomic_inc_return(&peer->rid);
100485 + end = atomic_inc_return_unchecked(&peer->rid);
100486 qp->rid = end;
100487
100488 rc = qp->q.fragments && (end - start) > max;
100489 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
100490 index e982b5c..f079d75 100644
100491 --- a/net/ipv4/ip_sockglue.c
100492 +++ b/net/ipv4/ip_sockglue.c
100493 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
100494 int val;
100495 int len;
100496
100497 + pax_track_stack();
100498 +
100499 if (level != SOL_IP)
100500 return -EOPNOTSUPP;
100501
100502 @@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
100503 if (sk->sk_type != SOCK_STREAM)
100504 return -ENOPROTOOPT;
100505
100506 - msg.msg_control = optval;
100507 + msg.msg_control = (void __force_kernel *)optval;
100508 msg.msg_controllen = len;
100509 msg.msg_flags = 0;
100510
100511 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
100512 index f8d04c2..c1188f2 100644
100513 --- a/net/ipv4/ipconfig.c
100514 +++ b/net/ipv4/ipconfig.c
100515 @@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
100516
100517 mm_segment_t oldfs = get_fs();
100518 set_fs(get_ds());
100519 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
100520 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
100521 set_fs(oldfs);
100522 return res;
100523 }
100524 @@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
100525
100526 mm_segment_t oldfs = get_fs();
100527 set_fs(get_ds());
100528 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
100529 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
100530 set_fs(oldfs);
100531 return res;
100532 }
100533 @@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
100534
100535 mm_segment_t oldfs = get_fs();
100536 set_fs(get_ds());
100537 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
100538 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
100539 set_fs(oldfs);
100540 return res;
100541 }
100542 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
100543 index c8b0cc3..05e4007 100644
100544 --- a/net/ipv4/netfilter/arp_tables.c
100545 +++ b/net/ipv4/netfilter/arp_tables.c
100546 @@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
100547 private = &tmp;
100548 }
100549 #endif
100550 + memset(&info, 0, sizeof(info));
100551 info.valid_hooks = t->valid_hooks;
100552 memcpy(info.hook_entry, private->hook_entry,
100553 sizeof(info.hook_entry));
100554 @@ -1003,6 +1004,11 @@ static int __do_replace(struct net *net, const char *name,
100555 unsigned int valid_hooks,
100556 struct xt_table_info *newinfo,
100557 unsigned int num_counters,
100558 + void __user *counters_ptr) __size_overflow(5);
100559 +static int __do_replace(struct net *net, const char *name,
100560 + unsigned int valid_hooks,
100561 + struct xt_table_info *newinfo,
100562 + unsigned int num_counters,
100563 void __user *counters_ptr)
100564 {
100565 int ret;
100566 @@ -1135,6 +1141,8 @@ add_counter_to_entry(struct arpt_entry *e,
100567 }
100568
100569 static int do_add_counters(struct net *net, void __user *user, unsigned int len,
100570 + int compat) __size_overflow(3);
100571 +static int do_add_counters(struct net *net, void __user *user, unsigned int len,
100572 int compat)
100573 {
100574 unsigned int i, curcpu;
100575 diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
100576 index c156db2..e772975 100644
100577 --- a/net/ipv4/netfilter/ip_queue.c
100578 +++ b/net/ipv4/netfilter/ip_queue.c
100579 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
100580
100581 if (v->data_len < sizeof(*user_iph))
100582 return 0;
100583 + if (v->data_len > 65535)
100584 + return -EMSGSIZE;
100585 +
100586 diff = v->data_len - e->skb->len;
100587 if (diff < 0) {
100588 if (pskb_trim(e->skb, v->data_len))
100589 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
100590 static inline void
100591 __ipq_rcv_skb(struct sk_buff *skb)
100592 {
100593 - int status, type, pid, flags, nlmsglen, skblen;
100594 + int status, type, pid, flags;
100595 + unsigned int nlmsglen, skblen;
100596 struct nlmsghdr *nlh;
100597
100598 skblen = skb->len;
100599 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
100600 index 0606db1..918b88a 100644
100601 --- a/net/ipv4/netfilter/ip_tables.c
100602 +++ b/net/ipv4/netfilter/ip_tables.c
100603 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
100604 private = &tmp;
100605 }
100606 #endif
100607 + memset(&info, 0, sizeof(info));
100608 info.valid_hooks = t->valid_hooks;
100609 memcpy(info.hook_entry, private->hook_entry,
100610 sizeof(info.hook_entry));
100611 @@ -1208,6 +1209,10 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
100612 static int
100613 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
100614 struct xt_table_info *newinfo, unsigned int num_counters,
100615 + void __user *counters_ptr) __size_overflow(5);
100616 +static int
100617 +__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
100618 + struct xt_table_info *newinfo, unsigned int num_counters,
100619 void __user *counters_ptr)
100620 {
100621 int ret;
100622 @@ -1339,6 +1344,8 @@ add_counter_to_entry(struct ipt_entry *e,
100623 }
100624
100625 static int
100626 +do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) __size_overflow(3);
100627 +static int
100628 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
100629 {
100630 unsigned int i, curcpu;
100631 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
100632 index d9521f6..127fa44 100644
100633 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
100634 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
100635 @@ -436,6 +436,10 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
100636 static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
100637 unsigned char *eoc,
100638 unsigned long **oid,
100639 + unsigned int *len) __size_overflow(2);
100640 +static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
100641 + unsigned char *eoc,
100642 + unsigned long **oid,
100643 unsigned int *len)
100644 {
100645 unsigned long subid;
100646 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
100647 index ab996f9..3da5f96 100644
100648 --- a/net/ipv4/raw.c
100649 +++ b/net/ipv4/raw.c
100650 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
100651 /* Charge it to the socket. */
100652
100653 if (sock_queue_rcv_skb(sk, skb) < 0) {
100654 - atomic_inc(&sk->sk_drops);
100655 + atomic_inc_unchecked(&sk->sk_drops);
100656 kfree_skb(skb);
100657 return NET_RX_DROP;
100658 }
100659 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
100660 int raw_rcv(struct sock *sk, struct sk_buff *skb)
100661 {
100662 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
100663 - atomic_inc(&sk->sk_drops);
100664 + atomic_inc_unchecked(&sk->sk_drops);
100665 kfree_skb(skb);
100666 return NET_RX_DROP;
100667 }
100668 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
100669
100670 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
100671 {
100672 + struct icmp_filter filter;
100673 +
100674 + if (optlen < 0)
100675 + return -EINVAL;
100676 if (optlen > sizeof(struct icmp_filter))
100677 optlen = sizeof(struct icmp_filter);
100678 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
100679 + if (copy_from_user(&filter, optval, optlen))
100680 return -EFAULT;
100681 + raw_sk(sk)->filter = filter;
100682 +
100683 return 0;
100684 }
100685
100686 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
100687 {
100688 int len, ret = -EFAULT;
100689 + struct icmp_filter filter;
100690
100691 if (get_user(len, optlen))
100692 goto out;
100693 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
100694 if (len > sizeof(struct icmp_filter))
100695 len = sizeof(struct icmp_filter);
100696 ret = -EFAULT;
100697 - if (put_user(len, optlen) ||
100698 - copy_to_user(optval, &raw_sk(sk)->filter, len))
100699 + filter = raw_sk(sk)->filter;
100700 + if (put_user(len, optlen) || len > sizeof filter ||
100701 + copy_to_user(optval, &filter, len))
100702 goto out;
100703 ret = 0;
100704 out: return ret;
100705 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
100706 sk_wmem_alloc_get(sp),
100707 sk_rmem_alloc_get(sp),
100708 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
100709 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
100710 + atomic_read(&sp->sk_refcnt),
100711 +#ifdef CONFIG_GRKERNSEC_HIDESYM
100712 + NULL,
100713 +#else
100714 + sp,
100715 +#endif
100716 + atomic_read_unchecked(&sp->sk_drops));
100717 }
100718
100719 static int raw_seq_show(struct seq_file *seq, void *v)
100720 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
100721 index 58f141b..b759702 100644
100722 --- a/net/ipv4/route.c
100723 +++ b/net/ipv4/route.c
100724 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
100725
100726 static inline int rt_genid(struct net *net)
100727 {
100728 - return atomic_read(&net->ipv4.rt_genid);
100729 + return atomic_read_unchecked(&net->ipv4.rt_genid);
100730 }
100731
100732 #ifdef CONFIG_PROC_FS
100733 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
100734 unsigned char shuffle;
100735
100736 get_random_bytes(&shuffle, sizeof(shuffle));
100737 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
100738 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
100739 }
100740
100741 /*
100742 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
100743
100744 static __net_init int rt_secret_timer_init(struct net *net)
100745 {
100746 - atomic_set(&net->ipv4.rt_genid,
100747 + atomic_set_unchecked(&net->ipv4.rt_genid,
100748 (int) ((num_physpages ^ (num_physpages>>8)) ^
100749 (jiffies ^ (jiffies >> 7))));
100750
100751 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
100752 index f095659..adc892a 100644
100753 --- a/net/ipv4/tcp.c
100754 +++ b/net/ipv4/tcp.c
100755 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
100756 int val;
100757 int err = 0;
100758
100759 + pax_track_stack();
100760 +
100761 /* This is a string value all the others are int's */
100762 if (optname == TCP_CONGESTION) {
100763 char name[TCP_CA_NAME_MAX];
100764 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
100765 struct tcp_sock *tp = tcp_sk(sk);
100766 int val, len;
100767
100768 + pax_track_stack();
100769 +
100770 if (get_user(len, optlen))
100771 return -EFAULT;
100772
100773 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
100774 index 6fc7961..33bad4a 100644
100775 --- a/net/ipv4/tcp_ipv4.c
100776 +++ b/net/ipv4/tcp_ipv4.c
100777 @@ -85,6 +85,9 @@
100778 int sysctl_tcp_tw_reuse __read_mostly;
100779 int sysctl_tcp_low_latency __read_mostly;
100780
100781 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100782 +extern int grsec_enable_blackhole;
100783 +#endif
100784
100785 #ifdef CONFIG_TCP_MD5SIG
100786 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
100787 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
100788 return 0;
100789
100790 reset:
100791 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100792 + if (!grsec_enable_blackhole)
100793 +#endif
100794 tcp_v4_send_reset(rsk, skb);
100795 discard:
100796 kfree_skb(skb);
100797 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
100798 TCP_SKB_CB(skb)->sacked = 0;
100799
100800 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
100801 - if (!sk)
100802 + if (!sk) {
100803 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100804 + ret = 1;
100805 +#endif
100806 goto no_tcp_socket;
100807 + }
100808
100809 process:
100810 - if (sk->sk_state == TCP_TIME_WAIT)
100811 + if (sk->sk_state == TCP_TIME_WAIT) {
100812 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100813 + ret = 2;
100814 +#endif
100815 goto do_time_wait;
100816 + }
100817
100818 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
100819 goto discard_and_relse;
100820 @@ -1651,6 +1665,10 @@ no_tcp_socket:
100821 bad_packet:
100822 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
100823 } else {
100824 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100825 + if (!grsec_enable_blackhole || (ret == 1 &&
100826 + (skb->dev->flags & IFF_LOOPBACK)))
100827 +#endif
100828 tcp_v4_send_reset(NULL, skb);
100829 }
100830
100831 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
100832 0, /* non standard timer */
100833 0, /* open_requests have no inode */
100834 atomic_read(&sk->sk_refcnt),
100835 +#ifdef CONFIG_GRKERNSEC_HIDESYM
100836 + NULL,
100837 +#else
100838 req,
100839 +#endif
100840 len);
100841 }
100842
100843 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
100844 sock_i_uid(sk),
100845 icsk->icsk_probes_out,
100846 sock_i_ino(sk),
100847 - atomic_read(&sk->sk_refcnt), sk,
100848 + atomic_read(&sk->sk_refcnt),
100849 +#ifdef CONFIG_GRKERNSEC_HIDESYM
100850 + NULL,
100851 +#else
100852 + sk,
100853 +#endif
100854 jiffies_to_clock_t(icsk->icsk_rto),
100855 jiffies_to_clock_t(icsk->icsk_ack.ato),
100856 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
100857 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
100858 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
100859 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
100860 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
100861 - atomic_read(&tw->tw_refcnt), tw, len);
100862 + atomic_read(&tw->tw_refcnt),
100863 +#ifdef CONFIG_GRKERNSEC_HIDESYM
100864 + NULL,
100865 +#else
100866 + tw,
100867 +#endif
100868 + len);
100869 }
100870
100871 #define TMPSZ 150
100872 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
100873 index 4c03598..e09a8e8 100644
100874 --- a/net/ipv4/tcp_minisocks.c
100875 +++ b/net/ipv4/tcp_minisocks.c
100876 @@ -26,6 +26,10 @@
100877 #include <net/inet_common.h>
100878 #include <net/xfrm.h>
100879
100880 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100881 +extern int grsec_enable_blackhole;
100882 +#endif
100883 +
100884 #ifdef CONFIG_SYSCTL
100885 #define SYNC_INIT 0 /* let the user enable it */
100886 #else
100887 @@ -672,6 +676,10 @@ listen_overflow:
100888
100889 embryonic_reset:
100890 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
100891 +
100892 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100893 + if (!grsec_enable_blackhole)
100894 +#endif
100895 if (!(flg & TCP_FLAG_RST))
100896 req->rsk_ops->send_reset(sk, skb);
100897
100898 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
100899 index af83bdf..ec91cb2 100644
100900 --- a/net/ipv4/tcp_output.c
100901 +++ b/net/ipv4/tcp_output.c
100902 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
100903 __u8 *md5_hash_location;
100904 int mss;
100905
100906 + pax_track_stack();
100907 +
100908 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
100909 if (skb == NULL)
100910 return NULL;
100911 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
100912 index 59f5b5e..193860f 100644
100913 --- a/net/ipv4/tcp_probe.c
100914 +++ b/net/ipv4/tcp_probe.c
100915 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
100916 if (cnt + width >= len)
100917 break;
100918
100919 - if (copy_to_user(buf + cnt, tbuf, width))
100920 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
100921 return -EFAULT;
100922 cnt += width;
100923 }
100924 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
100925 index 57d5501..a9ed13a 100644
100926 --- a/net/ipv4/tcp_timer.c
100927 +++ b/net/ipv4/tcp_timer.c
100928 @@ -21,6 +21,10 @@
100929 #include <linux/module.h>
100930 #include <net/tcp.h>
100931
100932 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100933 +extern int grsec_lastack_retries;
100934 +#endif
100935 +
100936 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
100937 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
100938 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
100939 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
100940 }
100941 }
100942
100943 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100944 + if ((sk->sk_state == TCP_LAST_ACK) &&
100945 + (grsec_lastack_retries > 0) &&
100946 + (grsec_lastack_retries < retry_until))
100947 + retry_until = grsec_lastack_retries;
100948 +#endif
100949 +
100950 if (retransmits_timed_out(sk, retry_until)) {
100951 /* Has it gone just too far? */
100952 tcp_write_err(sk);
100953 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
100954 index 8e28770..72105c8 100644
100955 --- a/net/ipv4/udp.c
100956 +++ b/net/ipv4/udp.c
100957 @@ -86,6 +86,7 @@
100958 #include <linux/types.h>
100959 #include <linux/fcntl.h>
100960 #include <linux/module.h>
100961 +#include <linux/security.h>
100962 #include <linux/socket.h>
100963 #include <linux/sockios.h>
100964 #include <linux/igmp.h>
100965 @@ -106,6 +107,10 @@
100966 #include <net/xfrm.h>
100967 #include "udp_impl.h"
100968
100969 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100970 +extern int grsec_enable_blackhole;
100971 +#endif
100972 +
100973 struct udp_table udp_table;
100974 EXPORT_SYMBOL(udp_table);
100975
100976 @@ -371,6 +376,9 @@ found:
100977 return s;
100978 }
100979
100980 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
100981 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
100982 +
100983 /*
100984 * This routine is called by the ICMP module when it gets some
100985 * sort of error condition. If err < 0 then the socket should
100986 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
100987 dport = usin->sin_port;
100988 if (dport == 0)
100989 return -EINVAL;
100990 +
100991 + err = gr_search_udp_sendmsg(sk, usin);
100992 + if (err)
100993 + return err;
100994 } else {
100995 if (sk->sk_state != TCP_ESTABLISHED)
100996 return -EDESTADDRREQ;
100997 +
100998 + err = gr_search_udp_sendmsg(sk, NULL);
100999 + if (err)
101000 + return err;
101001 +
101002 daddr = inet->daddr;
101003 dport = inet->dport;
101004 /* Open fast path for connected socket.
101005 @@ -945,6 +962,10 @@ try_again:
101006 if (!skb)
101007 goto out;
101008
101009 + err = gr_search_udp_recvmsg(sk, skb);
101010 + if (err)
101011 + goto out_free;
101012 +
101013 ulen = skb->len - sizeof(struct udphdr);
101014 copied = len;
101015 if (copied > ulen)
101016 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101017 if (rc == -ENOMEM) {
101018 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
101019 is_udplite);
101020 - atomic_inc(&sk->sk_drops);
101021 + atomic_inc_unchecked(&sk->sk_drops);
101022 }
101023 goto drop;
101024 }
101025 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
101026 goto csum_error;
101027
101028 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
101029 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101030 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
101031 +#endif
101032 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
101033
101034 /*
101035 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
101036 sk_wmem_alloc_get(sp),
101037 sk_rmem_alloc_get(sp),
101038 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
101039 - atomic_read(&sp->sk_refcnt), sp,
101040 - atomic_read(&sp->sk_drops), len);
101041 + atomic_read(&sp->sk_refcnt),
101042 +#ifdef CONFIG_GRKERNSEC_HIDESYM
101043 + NULL,
101044 +#else
101045 + sp,
101046 +#endif
101047 + atomic_read_unchecked(&sp->sk_drops), len);
101048 }
101049
101050 int udp4_seq_show(struct seq_file *seq, void *v)
101051 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
101052 index 8ac3d09..fc58c5f 100644
101053 --- a/net/ipv6/addrconf.c
101054 +++ b/net/ipv6/addrconf.c
101055 @@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
101056 p.iph.ihl = 5;
101057 p.iph.protocol = IPPROTO_IPV6;
101058 p.iph.ttl = 64;
101059 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
101060 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
101061
101062 if (ops->ndo_do_ioctl) {
101063 mm_segment_t oldfs = get_fs();
101064 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
101065 index cc4797d..7cfdfcc 100644
101066 --- a/net/ipv6/inet6_connection_sock.c
101067 +++ b/net/ipv6/inet6_connection_sock.c
101068 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
101069 #ifdef CONFIG_XFRM
101070 {
101071 struct rt6_info *rt = (struct rt6_info *)dst;
101072 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
101073 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
101074 }
101075 #endif
101076 }
101077 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
101078 #ifdef CONFIG_XFRM
101079 if (dst) {
101080 struct rt6_info *rt = (struct rt6_info *)dst;
101081 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
101082 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
101083 sk->sk_dst_cache = NULL;
101084 dst_release(dst);
101085 dst = NULL;
101086 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
101087 index 093e9b2..f72cddb 100644
101088 --- a/net/ipv6/inet6_hashtables.c
101089 +++ b/net/ipv6/inet6_hashtables.c
101090 @@ -119,7 +119,7 @@ out:
101091 }
101092 EXPORT_SYMBOL(__inet6_lookup_established);
101093
101094 -static int inline compute_score(struct sock *sk, struct net *net,
101095 +static inline int compute_score(struct sock *sk, struct net *net,
101096 const unsigned short hnum,
101097 const struct in6_addr *daddr,
101098 const int dif)
101099 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
101100 index 4f7aaf6..f7acf45 100644
101101 --- a/net/ipv6/ipv6_sockglue.c
101102 +++ b/net/ipv6/ipv6_sockglue.c
101103 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
101104 int val, valbool;
101105 int retv = -ENOPROTOOPT;
101106
101107 + pax_track_stack();
101108 +
101109 if (optval == NULL)
101110 val=0;
101111 else {
101112 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
101113 int len;
101114 int val;
101115
101116 + pax_track_stack();
101117 +
101118 if (ip6_mroute_opt(optname))
101119 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
101120
101121 @@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
101122 if (sk->sk_type != SOCK_STREAM)
101123 return -ENOPROTOOPT;
101124
101125 - msg.msg_control = optval;
101126 + msg.msg_control = (void __force_kernel *)optval;
101127 msg.msg_controllen = len;
101128 msg.msg_flags = 0;
101129
101130 diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
101131 index 1cf3f0c..1d4376f 100644
101132 --- a/net/ipv6/netfilter/ip6_queue.c
101133 +++ b/net/ipv6/netfilter/ip6_queue.c
101134 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
101135
101136 if (v->data_len < sizeof(*user_iph))
101137 return 0;
101138 + if (v->data_len > 65535)
101139 + return -EMSGSIZE;
101140 +
101141 diff = v->data_len - e->skb->len;
101142 if (diff < 0) {
101143 if (pskb_trim(e->skb, v->data_len))
101144 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
101145 static inline void
101146 __ipq_rcv_skb(struct sk_buff *skb)
101147 {
101148 - int status, type, pid, flags, nlmsglen, skblen;
101149 + int status, type, pid, flags;
101150 + unsigned int nlmsglen, skblen;
101151 struct nlmsghdr *nlh;
101152
101153 skblen = skb->len;
101154 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
101155 index 78b5a36..2b9bb06 100644
101156 --- a/net/ipv6/netfilter/ip6_tables.c
101157 +++ b/net/ipv6/netfilter/ip6_tables.c
101158 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
101159 private = &tmp;
101160 }
101161 #endif
101162 + memset(&info, 0, sizeof(info));
101163 info.valid_hooks = t->valid_hooks;
101164 memcpy(info.hook_entry, private->hook_entry,
101165 sizeof(info.hook_entry));
101166 @@ -1240,6 +1241,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
101167 static int
101168 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
101169 struct xt_table_info *newinfo, unsigned int num_counters,
101170 + void __user *counters_ptr) __size_overflow(5);
101171 +static int
101172 +__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
101173 + struct xt_table_info *newinfo, unsigned int num_counters,
101174 void __user *counters_ptr)
101175 {
101176 int ret;
101177 @@ -1373,6 +1378,9 @@ add_counter_to_entry(struct ip6t_entry *e,
101178
101179 static int
101180 do_add_counters(struct net *net, void __user *user, unsigned int len,
101181 + int compat) __size_overflow(3);
101182 +static int
101183 +do_add_counters(struct net *net, void __user *user, unsigned int len,
101184 int compat)
101185 {
101186 unsigned int i, curcpu;
101187 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
101188 index 4f24570..b813b34 100644
101189 --- a/net/ipv6/raw.c
101190 +++ b/net/ipv6/raw.c
101191 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
101192 {
101193 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
101194 skb_checksum_complete(skb)) {
101195 - atomic_inc(&sk->sk_drops);
101196 + atomic_inc_unchecked(&sk->sk_drops);
101197 kfree_skb(skb);
101198 return NET_RX_DROP;
101199 }
101200
101201 /* Charge it to the socket. */
101202 if (sock_queue_rcv_skb(sk,skb)<0) {
101203 - atomic_inc(&sk->sk_drops);
101204 + atomic_inc_unchecked(&sk->sk_drops);
101205 kfree_skb(skb);
101206 return NET_RX_DROP;
101207 }
101208 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
101209 struct raw6_sock *rp = raw6_sk(sk);
101210
101211 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
101212 - atomic_inc(&sk->sk_drops);
101213 + atomic_inc_unchecked(&sk->sk_drops);
101214 kfree_skb(skb);
101215 return NET_RX_DROP;
101216 }
101217 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
101218
101219 if (inet->hdrincl) {
101220 if (skb_checksum_complete(skb)) {
101221 - atomic_inc(&sk->sk_drops);
101222 + atomic_inc_unchecked(&sk->sk_drops);
101223 kfree_skb(skb);
101224 return NET_RX_DROP;
101225 }
101226 @@ -518,7 +518,7 @@ csum_copy_err:
101227 as some normal condition.
101228 */
101229 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
101230 - atomic_inc(&sk->sk_drops);
101231 + atomic_inc_unchecked(&sk->sk_drops);
101232 goto out;
101233 }
101234
101235 @@ -600,7 +600,7 @@ out:
101236 return err;
101237 }
101238
101239 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
101240 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
101241 struct flowi *fl, struct rt6_info *rt,
101242 unsigned int flags)
101243 {
101244 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
101245 u16 proto;
101246 int err;
101247
101248 + pax_track_stack();
101249 +
101250 /* Rough check on arithmetic overflow,
101251 better check is made in ip6_append_data().
101252 */
101253 @@ -916,12 +918,17 @@ do_confirm:
101254 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
101255 char __user *optval, int optlen)
101256 {
101257 + struct icmp6_filter filter;
101258 +
101259 switch (optname) {
101260 case ICMPV6_FILTER:
101261 + if (optlen < 0)
101262 + return -EINVAL;
101263 if (optlen > sizeof(struct icmp6_filter))
101264 optlen = sizeof(struct icmp6_filter);
101265 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
101266 + if (copy_from_user(&filter, optval, optlen))
101267 return -EFAULT;
101268 + raw6_sk(sk)->filter = filter;
101269 return 0;
101270 default:
101271 return -ENOPROTOOPT;
101272 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
101273 char __user *optval, int __user *optlen)
101274 {
101275 int len;
101276 + struct icmp6_filter filter;
101277
101278 switch (optname) {
101279 case ICMPV6_FILTER:
101280 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
101281 len = sizeof(struct icmp6_filter);
101282 if (put_user(len, optlen))
101283 return -EFAULT;
101284 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
101285 + filter = raw6_sk(sk)->filter;
101286 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
101287 return -EFAULT;
101288 return 0;
101289 default:
101290 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
101291 0, 0L, 0,
101292 sock_i_uid(sp), 0,
101293 sock_i_ino(sp),
101294 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
101295 + atomic_read(&sp->sk_refcnt),
101296 +#ifdef CONFIG_GRKERNSEC_HIDESYM
101297 + NULL,
101298 +#else
101299 + sp,
101300 +#endif
101301 + atomic_read_unchecked(&sp->sk_drops));
101302 }
101303
101304 static int raw6_seq_show(struct seq_file *seq, void *v)
101305 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
101306 index faae6df..d4430c1 100644
101307 --- a/net/ipv6/tcp_ipv6.c
101308 +++ b/net/ipv6/tcp_ipv6.c
101309 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
101310 }
101311 #endif
101312
101313 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101314 +extern int grsec_enable_blackhole;
101315 +#endif
101316 +
101317 static void tcp_v6_hash(struct sock *sk)
101318 {
101319 if (sk->sk_state != TCP_CLOSE) {
101320 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
101321 return 0;
101322
101323 reset:
101324 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101325 + if (!grsec_enable_blackhole)
101326 +#endif
101327 tcp_v6_send_reset(sk, skb);
101328 discard:
101329 if (opt_skb)
101330 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
101331 TCP_SKB_CB(skb)->sacked = 0;
101332
101333 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
101334 - if (!sk)
101335 + if (!sk) {
101336 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101337 + ret = 1;
101338 +#endif
101339 goto no_tcp_socket;
101340 + }
101341
101342 process:
101343 - if (sk->sk_state == TCP_TIME_WAIT)
101344 + if (sk->sk_state == TCP_TIME_WAIT) {
101345 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101346 + ret = 2;
101347 +#endif
101348 goto do_time_wait;
101349 + }
101350
101351 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
101352 goto discard_and_relse;
101353 @@ -1701,6 +1716,10 @@ no_tcp_socket:
101354 bad_packet:
101355 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
101356 } else {
101357 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101358 + if (!grsec_enable_blackhole || (ret == 1 &&
101359 + (skb->dev->flags & IFF_LOOPBACK)))
101360 +#endif
101361 tcp_v6_send_reset(NULL, skb);
101362 }
101363
101364 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
101365 uid,
101366 0, /* non standard timer */
101367 0, /* open_requests have no inode */
101368 - 0, req);
101369 + 0,
101370 +#ifdef CONFIG_GRKERNSEC_HIDESYM
101371 + NULL
101372 +#else
101373 + req
101374 +#endif
101375 + );
101376 }
101377
101378 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
101379 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
101380 sock_i_uid(sp),
101381 icsk->icsk_probes_out,
101382 sock_i_ino(sp),
101383 - atomic_read(&sp->sk_refcnt), sp,
101384 + atomic_read(&sp->sk_refcnt),
101385 +#ifdef CONFIG_GRKERNSEC_HIDESYM
101386 + NULL,
101387 +#else
101388 + sp,
101389 +#endif
101390 jiffies_to_clock_t(icsk->icsk_rto),
101391 jiffies_to_clock_t(icsk->icsk_ack.ato),
101392 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
101393 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
101394 dest->s6_addr32[2], dest->s6_addr32[3], destp,
101395 tw->tw_substate, 0, 0,
101396 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
101397 - atomic_read(&tw->tw_refcnt), tw);
101398 + atomic_read(&tw->tw_refcnt),
101399 +#ifdef CONFIG_GRKERNSEC_HIDESYM
101400 + NULL
101401 +#else
101402 + tw
101403 +#endif
101404 + );
101405 }
101406
101407 static int tcp6_seq_show(struct seq_file *seq, void *v)
101408 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
101409 index 9cc6289..052c521 100644
101410 --- a/net/ipv6/udp.c
101411 +++ b/net/ipv6/udp.c
101412 @@ -49,6 +49,10 @@
101413 #include <linux/seq_file.h>
101414 #include "udp_impl.h"
101415
101416 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101417 +extern int grsec_enable_blackhole;
101418 +#endif
101419 +
101420 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
101421 {
101422 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
101423 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
101424 if (rc == -ENOMEM) {
101425 UDP6_INC_STATS_BH(sock_net(sk),
101426 UDP_MIB_RCVBUFERRORS, is_udplite);
101427 - atomic_inc(&sk->sk_drops);
101428 + atomic_inc_unchecked(&sk->sk_drops);
101429 }
101430 goto drop;
101431 }
101432 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
101433 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
101434 proto == IPPROTO_UDPLITE);
101435
101436 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101437 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
101438 +#endif
101439 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
101440
101441 kfree_skb(skb);
101442 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
101443 0, 0L, 0,
101444 sock_i_uid(sp), 0,
101445 sock_i_ino(sp),
101446 - atomic_read(&sp->sk_refcnt), sp,
101447 - atomic_read(&sp->sk_drops));
101448 + atomic_read(&sp->sk_refcnt),
101449 +#ifdef CONFIG_GRKERNSEC_HIDESYM
101450 + NULL,
101451 +#else
101452 + sp,
101453 +#endif
101454 + atomic_read_unchecked(&sp->sk_drops));
101455 }
101456
101457 int udp6_seq_show(struct seq_file *seq, void *v)
101458 diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
101459 index 48bb1e3..5980e6e 100644
101460 --- a/net/ipv6/xfrm6_tunnel.c
101461 +++ b/net/ipv6/xfrm6_tunnel.c
101462 @@ -258,7 +258,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
101463 __be32 spi;
101464
101465 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
101466 - return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
101467 + return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
101468 }
101469
101470 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
101471 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
101472 index 811984d..11f59b7 100644
101473 --- a/net/irda/ircomm/ircomm_tty.c
101474 +++ b/net/irda/ircomm/ircomm_tty.c
101475 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101476 add_wait_queue(&self->open_wait, &wait);
101477
101478 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
101479 - __FILE__,__LINE__, tty->driver->name, self->open_count );
101480 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
101481
101482 /* As far as I can see, we protect open_count - Jean II */
101483 spin_lock_irqsave(&self->spinlock, flags);
101484 if (!tty_hung_up_p(filp)) {
101485 extra_count = 1;
101486 - self->open_count--;
101487 + local_dec(&self->open_count);
101488 }
101489 spin_unlock_irqrestore(&self->spinlock, flags);
101490 - self->blocked_open++;
101491 + local_inc(&self->blocked_open);
101492
101493 while (1) {
101494 if (tty->termios->c_cflag & CBAUD) {
101495 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101496 }
101497
101498 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
101499 - __FILE__,__LINE__, tty->driver->name, self->open_count );
101500 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
101501
101502 schedule();
101503 }
101504 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101505 if (extra_count) {
101506 /* ++ is not atomic, so this should be protected - Jean II */
101507 spin_lock_irqsave(&self->spinlock, flags);
101508 - self->open_count++;
101509 + local_inc(&self->open_count);
101510 spin_unlock_irqrestore(&self->spinlock, flags);
101511 }
101512 - self->blocked_open--;
101513 + local_dec(&self->blocked_open);
101514
101515 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
101516 - __FILE__,__LINE__, tty->driver->name, self->open_count);
101517 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
101518
101519 if (!retval)
101520 self->flags |= ASYNC_NORMAL_ACTIVE;
101521 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
101522 }
101523 /* ++ is not atomic, so this should be protected - Jean II */
101524 spin_lock_irqsave(&self->spinlock, flags);
101525 - self->open_count++;
101526 + local_inc(&self->open_count);
101527
101528 tty->driver_data = self;
101529 self->tty = tty;
101530 spin_unlock_irqrestore(&self->spinlock, flags);
101531
101532 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
101533 - self->line, self->open_count);
101534 + self->line, local_read(&self->open_count));
101535
101536 /* Not really used by us, but lets do it anyway */
101537 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
101538 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101539 return;
101540 }
101541
101542 - if ((tty->count == 1) && (self->open_count != 1)) {
101543 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
101544 /*
101545 * Uh, oh. tty->count is 1, which means that the tty
101546 * structure will be freed. state->count should always
101547 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101548 */
101549 IRDA_DEBUG(0, "%s(), bad serial port count; "
101550 "tty->count is 1, state->count is %d\n", __func__ ,
101551 - self->open_count);
101552 - self->open_count = 1;
101553 + local_read(&self->open_count));
101554 + local_set(&self->open_count, 1);
101555 }
101556
101557 - if (--self->open_count < 0) {
101558 + if (local_dec_return(&self->open_count) < 0) {
101559 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
101560 - __func__, self->line, self->open_count);
101561 - self->open_count = 0;
101562 + __func__, self->line, local_read(&self->open_count));
101563 + local_set(&self->open_count, 0);
101564 }
101565 - if (self->open_count) {
101566 + if (local_read(&self->open_count)) {
101567 spin_unlock_irqrestore(&self->spinlock, flags);
101568
101569 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
101570 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101571 tty->closing = 0;
101572 self->tty = NULL;
101573
101574 - if (self->blocked_open) {
101575 + if (local_read(&self->blocked_open)) {
101576 if (self->close_delay)
101577 schedule_timeout_interruptible(self->close_delay);
101578 wake_up_interruptible(&self->open_wait);
101579 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
101580 spin_lock_irqsave(&self->spinlock, flags);
101581 self->flags &= ~ASYNC_NORMAL_ACTIVE;
101582 self->tty = NULL;
101583 - self->open_count = 0;
101584 + local_set(&self->open_count, 0);
101585 spin_unlock_irqrestore(&self->spinlock, flags);
101586
101587 wake_up_interruptible(&self->open_wait);
101588 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
101589 seq_putc(m, '\n');
101590
101591 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
101592 - seq_printf(m, "Open count: %d\n", self->open_count);
101593 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
101594 seq_printf(m, "Max data size: %d\n", self->max_data_size);
101595 seq_printf(m, "Max header size: %d\n", self->max_header_size);
101596
101597 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
101598 index bada1b9..f325943 100644
101599 --- a/net/iucv/af_iucv.c
101600 +++ b/net/iucv/af_iucv.c
101601 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
101602
101603 write_lock_bh(&iucv_sk_list.lock);
101604
101605 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
101606 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
101607 while (__iucv_get_sock_by_name(name)) {
101608 sprintf(name, "%08x",
101609 - atomic_inc_return(&iucv_sk_list.autobind_name));
101610 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
101611 }
101612
101613 write_unlock_bh(&iucv_sk_list.lock);
101614 diff --git a/net/key/af_key.c b/net/key/af_key.c
101615 index 4e98193..439b449 100644
101616 --- a/net/key/af_key.c
101617 +++ b/net/key/af_key.c
101618 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
101619 struct xfrm_migrate m[XFRM_MAX_DEPTH];
101620 struct xfrm_kmaddress k;
101621
101622 + pax_track_stack();
101623 +
101624 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
101625 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
101626 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
101627 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
101628 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
101629 else
101630 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
101631 +#ifdef CONFIG_GRKERNSEC_HIDESYM
101632 + NULL,
101633 +#else
101634 s,
101635 +#endif
101636 atomic_read(&s->sk_refcnt),
101637 sk_rmem_alloc_get(s),
101638 sk_wmem_alloc_get(s),
101639 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
101640 index bda96d1..c038b72 100644
101641 --- a/net/lapb/lapb_iface.c
101642 +++ b/net/lapb/lapb_iface.c
101643 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
101644 goto out;
101645
101646 lapb->dev = dev;
101647 - lapb->callbacks = *callbacks;
101648 + lapb->callbacks = callbacks;
101649
101650 __lapb_insert_cb(lapb);
101651
101652 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
101653
101654 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
101655 {
101656 - if (lapb->callbacks.connect_confirmation)
101657 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
101658 + if (lapb->callbacks->connect_confirmation)
101659 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
101660 }
101661
101662 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
101663 {
101664 - if (lapb->callbacks.connect_indication)
101665 - lapb->callbacks.connect_indication(lapb->dev, reason);
101666 + if (lapb->callbacks->connect_indication)
101667 + lapb->callbacks->connect_indication(lapb->dev, reason);
101668 }
101669
101670 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
101671 {
101672 - if (lapb->callbacks.disconnect_confirmation)
101673 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
101674 + if (lapb->callbacks->disconnect_confirmation)
101675 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
101676 }
101677
101678 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
101679 {
101680 - if (lapb->callbacks.disconnect_indication)
101681 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
101682 + if (lapb->callbacks->disconnect_indication)
101683 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
101684 }
101685
101686 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
101687 {
101688 - if (lapb->callbacks.data_indication)
101689 - return lapb->callbacks.data_indication(lapb->dev, skb);
101690 + if (lapb->callbacks->data_indication)
101691 + return lapb->callbacks->data_indication(lapb->dev, skb);
101692
101693 kfree_skb(skb);
101694 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
101695 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
101696 {
101697 int used = 0;
101698
101699 - if (lapb->callbacks.data_transmit) {
101700 - lapb->callbacks.data_transmit(lapb->dev, skb);
101701 + if (lapb->callbacks->data_transmit) {
101702 + lapb->callbacks->data_transmit(lapb->dev, skb);
101703 used = 1;
101704 }
101705
101706 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
101707 index fe2d3f8..e57f683 100644
101708 --- a/net/mac80211/cfg.c
101709 +++ b/net/mac80211/cfg.c
101710 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
101711 return err;
101712 }
101713
101714 -struct cfg80211_ops mac80211_config_ops = {
101715 +const struct cfg80211_ops mac80211_config_ops = {
101716 .add_virtual_intf = ieee80211_add_iface,
101717 .del_virtual_intf = ieee80211_del_iface,
101718 .change_virtual_intf = ieee80211_change_iface,
101719 diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
101720 index 7d7879f..2d51f62 100644
101721 --- a/net/mac80211/cfg.h
101722 +++ b/net/mac80211/cfg.h
101723 @@ -4,6 +4,6 @@
101724 #ifndef __CFG_H
101725 #define __CFG_H
101726
101727 -extern struct cfg80211_ops mac80211_config_ops;
101728 +extern const struct cfg80211_ops mac80211_config_ops;
101729
101730 #endif /* __CFG_H */
101731 diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
101732 index 99c7525..9cb4937 100644
101733 --- a/net/mac80211/debugfs_key.c
101734 +++ b/net/mac80211/debugfs_key.c
101735 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
101736 size_t count, loff_t *ppos)
101737 {
101738 struct ieee80211_key *key = file->private_data;
101739 - int i, res, bufsize = 2 * key->conf.keylen + 2;
101740 + int i, bufsize = 2 * key->conf.keylen + 2;
101741 char *buf = kmalloc(bufsize, GFP_KERNEL);
101742 char *p = buf;
101743 + ssize_t res;
101744 +
101745 + if (buf == NULL)
101746 + return -ENOMEM;
101747
101748 for (i = 0; i < key->conf.keylen; i++)
101749 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
101750 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
101751 index 33a2e89..08650c8 100644
101752 --- a/net/mac80211/debugfs_sta.c
101753 +++ b/net/mac80211/debugfs_sta.c
101754 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
101755 int i;
101756 struct sta_info *sta = file->private_data;
101757
101758 + pax_track_stack();
101759 +
101760 spin_lock_bh(&sta->lock);
101761 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
101762 sta->ampdu_mlme.dialog_token_allocator + 1);
101763 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
101764 index ca62bfe..6657a03 100644
101765 --- a/net/mac80211/ieee80211_i.h
101766 +++ b/net/mac80211/ieee80211_i.h
101767 @@ -25,6 +25,7 @@
101768 #include <linux/etherdevice.h>
101769 #include <net/cfg80211.h>
101770 #include <net/mac80211.h>
101771 +#include <asm/local.h>
101772 #include "key.h"
101773 #include "sta_info.h"
101774
101775 @@ -635,7 +636,7 @@ struct ieee80211_local {
101776 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
101777 spinlock_t queue_stop_reason_lock;
101778
101779 - int open_count;
101780 + local_t open_count;
101781 int monitors, cooked_mntrs;
101782 /* number of interfaces with corresponding FIF_ flags */
101783 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
101784 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
101785 index 079c500..eb3c6d4 100644
101786 --- a/net/mac80211/iface.c
101787 +++ b/net/mac80211/iface.c
101788 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
101789 break;
101790 }
101791
101792 - if (local->open_count == 0) {
101793 + if (local_read(&local->open_count) == 0) {
101794 res = drv_start(local);
101795 if (res)
101796 goto err_del_bss;
101797 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
101798 * Validate the MAC address for this device.
101799 */
101800 if (!is_valid_ether_addr(dev->dev_addr)) {
101801 - if (!local->open_count)
101802 + if (!local_read(&local->open_count))
101803 drv_stop(local);
101804 return -EADDRNOTAVAIL;
101805 }
101806 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
101807
101808 hw_reconf_flags |= __ieee80211_recalc_idle(local);
101809
101810 - local->open_count++;
101811 + local_inc(&local->open_count);
101812 if (hw_reconf_flags) {
101813 ieee80211_hw_config(local, hw_reconf_flags);
101814 /*
101815 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
101816 err_del_interface:
101817 drv_remove_interface(local, &conf);
101818 err_stop:
101819 - if (!local->open_count)
101820 + if (!local_read(&local->open_count))
101821 drv_stop(local);
101822 err_del_bss:
101823 sdata->bss = NULL;
101824 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
101825 WARN_ON(!list_empty(&sdata->u.ap.vlans));
101826 }
101827
101828 - local->open_count--;
101829 + local_dec(&local->open_count);
101830
101831 switch (sdata->vif.type) {
101832 case NL80211_IFTYPE_AP_VLAN:
101833 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
101834
101835 ieee80211_recalc_ps(local, -1);
101836
101837 - if (local->open_count == 0) {
101838 + if (local_read(&local->open_count) == 0) {
101839 ieee80211_clear_tx_pending(local);
101840 ieee80211_stop_device(local);
101841
101842 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
101843 index 2dfe176..74e4388 100644
101844 --- a/net/mac80211/main.c
101845 +++ b/net/mac80211/main.c
101846 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
101847 local->hw.conf.power_level = power;
101848 }
101849
101850 - if (changed && local->open_count) {
101851 + if (changed && local_read(&local->open_count)) {
101852 ret = drv_config(local, changed);
101853 /*
101854 * Goal:
101855 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
101856 index e67eea7..fcc227e 100644
101857 --- a/net/mac80211/mlme.c
101858 +++ b/net/mac80211/mlme.c
101859 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
101860 bool have_higher_than_11mbit = false, newsta = false;
101861 u16 ap_ht_cap_flags;
101862
101863 + pax_track_stack();
101864 +
101865 /*
101866 * AssocResp and ReassocResp have identical structure, so process both
101867 * of them in this function.
101868 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
101869 index e535f1c..4d733d1 100644
101870 --- a/net/mac80211/pm.c
101871 +++ b/net/mac80211/pm.c
101872 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
101873 }
101874
101875 /* stop hardware - this must stop RX */
101876 - if (local->open_count)
101877 + if (local_read(&local->open_count))
101878 ieee80211_stop_device(local);
101879
101880 local->suspended = true;
101881 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
101882 index b33efc4..0a2efb6 100644
101883 --- a/net/mac80211/rate.c
101884 +++ b/net/mac80211/rate.c
101885 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
101886 struct rate_control_ref *ref, *old;
101887
101888 ASSERT_RTNL();
101889 - if (local->open_count)
101890 + if (local_read(&local->open_count))
101891 return -EBUSY;
101892
101893 ref = rate_control_alloc(name, local);
101894 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
101895 index b1d7904..57e4da7 100644
101896 --- a/net/mac80211/tx.c
101897 +++ b/net/mac80211/tx.c
101898 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
101899 return cpu_to_le16(dur);
101900 }
101901
101902 -static int inline is_ieee80211_device(struct ieee80211_local *local,
101903 +static inline int is_ieee80211_device(struct ieee80211_local *local,
101904 struct net_device *dev)
101905 {
101906 return local == wdev_priv(dev->ieee80211_ptr);
101907 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
101908 index 31b1085..48fb26d 100644
101909 --- a/net/mac80211/util.c
101910 +++ b/net/mac80211/util.c
101911 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
101912 local->resuming = true;
101913
101914 /* restart hardware */
101915 - if (local->open_count) {
101916 + if (local_read(&local->open_count)) {
101917 /*
101918 * Upon resume hardware can sometimes be goofy due to
101919 * various platform / driver / bus issues, so restarting
101920 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
101921 index 634d14a..b35a608 100644
101922 --- a/net/netfilter/Kconfig
101923 +++ b/net/netfilter/Kconfig
101924 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
101925
101926 To compile it as a module, choose M here. If unsure, say N.
101927
101928 +config NETFILTER_XT_MATCH_GRADM
101929 + tristate '"gradm" match support'
101930 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
101931 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
101932 + ---help---
101933 + The gradm match allows to match on grsecurity RBAC being enabled.
101934 + It is useful when iptables rules are applied early on bootup to
101935 + prevent connections to the machine (except from a trusted host)
101936 + while the RBAC system is disabled.
101937 +
101938 config NETFILTER_XT_MATCH_HASHLIMIT
101939 tristate '"hashlimit" match support'
101940 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
101941 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
101942 index 49f62ee..a17b2c6 100644
101943 --- a/net/netfilter/Makefile
101944 +++ b/net/netfilter/Makefile
101945 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
101946 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
101947 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
101948 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
101949 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
101950 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
101951 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
101952 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
101953 diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
101954 index 3c7e427..724043c 100644
101955 --- a/net/netfilter/ipvs/ip_vs_app.c
101956 +++ b/net/netfilter/ipvs/ip_vs_app.c
101957 @@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
101958 .open = ip_vs_app_open,
101959 .read = seq_read,
101960 .llseek = seq_lseek,
101961 - .release = seq_release,
101962 + .release = seq_release_net,
101963 };
101964 #endif
101965
101966 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
101967 index 95682e5..457dbac 100644
101968 --- a/net/netfilter/ipvs/ip_vs_conn.c
101969 +++ b/net/netfilter/ipvs/ip_vs_conn.c
101970 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
101971 /* if the connection is not template and is created
101972 * by sync, preserve the activity flag.
101973 */
101974 - cp->flags |= atomic_read(&dest->conn_flags) &
101975 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
101976 (~IP_VS_CONN_F_INACTIVE);
101977 else
101978 - cp->flags |= atomic_read(&dest->conn_flags);
101979 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
101980 cp->dest = dest;
101981
101982 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
101983 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
101984 atomic_set(&cp->refcnt, 1);
101985
101986 atomic_set(&cp->n_control, 0);
101987 - atomic_set(&cp->in_pkts, 0);
101988 + atomic_set_unchecked(&cp->in_pkts, 0);
101989
101990 atomic_inc(&ip_vs_conn_count);
101991 if (flags & IP_VS_CONN_F_NO_CPORT)
101992 @@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
101993 .open = ip_vs_conn_open,
101994 .read = seq_read,
101995 .llseek = seq_lseek,
101996 - .release = seq_release,
101997 + .release = seq_release_net,
101998 };
101999
102000 static const char *ip_vs_origin_name(unsigned flags)
102001 @@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
102002 .open = ip_vs_conn_sync_open,
102003 .read = seq_read,
102004 .llseek = seq_lseek,
102005 - .release = seq_release,
102006 + .release = seq_release_net,
102007 };
102008
102009 #endif
102010 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
102011
102012 /* Don't drop the entry if its number of incoming packets is not
102013 located in [0, 8] */
102014 - i = atomic_read(&cp->in_pkts);
102015 + i = atomic_read_unchecked(&cp->in_pkts);
102016 if (i > 8 || i < 0) return 0;
102017
102018 if (!todrop_rate[i]) return 0;
102019 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
102020 index b95699f..5fee919 100644
102021 --- a/net/netfilter/ipvs/ip_vs_core.c
102022 +++ b/net/netfilter/ipvs/ip_vs_core.c
102023 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
102024 ret = cp->packet_xmit(skb, cp, pp);
102025 /* do not touch skb anymore */
102026
102027 - atomic_inc(&cp->in_pkts);
102028 + atomic_inc_unchecked(&cp->in_pkts);
102029 ip_vs_conn_put(cp);
102030 return ret;
102031 }
102032 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
102033 * Sync connection if it is about to close to
102034 * encorage the standby servers to update the connections timeout
102035 */
102036 - pkts = atomic_add_return(1, &cp->in_pkts);
102037 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
102038 if (af == AF_INET &&
102039 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
102040 (((cp->protocol != IPPROTO_TCP ||
102041 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
102042 index 02b2610..2d89424 100644
102043 --- a/net/netfilter/ipvs/ip_vs_ctl.c
102044 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
102045 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
102046 ip_vs_rs_hash(dest);
102047 write_unlock_bh(&__ip_vs_rs_lock);
102048 }
102049 - atomic_set(&dest->conn_flags, conn_flags);
102050 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
102051
102052 /* bind the service */
102053 if (!dest->svc) {
102054 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
102055 " %-7s %-6d %-10d %-10d\n",
102056 &dest->addr.in6,
102057 ntohs(dest->port),
102058 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
102059 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
102060 atomic_read(&dest->weight),
102061 atomic_read(&dest->activeconns),
102062 atomic_read(&dest->inactconns));
102063 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
102064 "%-7s %-6d %-10d %-10d\n",
102065 ntohl(dest->addr.ip),
102066 ntohs(dest->port),
102067 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
102068 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
102069 atomic_read(&dest->weight),
102070 atomic_read(&dest->activeconns),
102071 atomic_read(&dest->inactconns));
102072 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
102073 .open = ip_vs_info_open,
102074 .read = seq_read,
102075 .llseek = seq_lseek,
102076 - .release = seq_release_private,
102077 + .release = seq_release_net,
102078 };
102079
102080 #endif
102081 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
102082 .open = ip_vs_stats_seq_open,
102083 .read = seq_read,
102084 .llseek = seq_lseek,
102085 - .release = single_release,
102086 + .release = single_release_net,
102087 };
102088
102089 #endif
102090 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
102091
102092 entry.addr = dest->addr.ip;
102093 entry.port = dest->port;
102094 - entry.conn_flags = atomic_read(&dest->conn_flags);
102095 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
102096 entry.weight = atomic_read(&dest->weight);
102097 entry.u_threshold = dest->u_threshold;
102098 entry.l_threshold = dest->l_threshold;
102099 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102100 unsigned char arg[128];
102101 int ret = 0;
102102
102103 + pax_track_stack();
102104 +
102105 if (!capable(CAP_NET_ADMIN))
102106 return -EPERM;
102107
102108 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
102109 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
102110
102111 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
102112 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
102113 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
102114 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
102115 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
102116 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
102117 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
102118 index e177f0d..55e8581 100644
102119 --- a/net/netfilter/ipvs/ip_vs_sync.c
102120 +++ b/net/netfilter/ipvs/ip_vs_sync.c
102121 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
102122
102123 if (opt)
102124 memcpy(&cp->in_seq, opt, sizeof(*opt));
102125 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
102126 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
102127 cp->state = state;
102128 cp->old_state = cp->state;
102129 /*
102130 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
102131 index 30b3189..e2e4b55 100644
102132 --- a/net/netfilter/ipvs/ip_vs_xmit.c
102133 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
102134 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
102135 else
102136 rc = NF_ACCEPT;
102137 /* do not touch skb anymore */
102138 - atomic_inc(&cp->in_pkts);
102139 + atomic_inc_unchecked(&cp->in_pkts);
102140 goto out;
102141 }
102142
102143 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
102144 else
102145 rc = NF_ACCEPT;
102146 /* do not touch skb anymore */
102147 - atomic_inc(&cp->in_pkts);
102148 + atomic_inc_unchecked(&cp->in_pkts);
102149 goto out;
102150 }
102151
102152 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
102153 index d521718..d0fd7a1 100644
102154 --- a/net/netfilter/nf_conntrack_netlink.c
102155 +++ b/net/netfilter/nf_conntrack_netlink.c
102156 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
102157 static int
102158 ctnetlink_parse_tuple(const struct nlattr * const cda[],
102159 struct nf_conntrack_tuple *tuple,
102160 - enum ctattr_tuple type, u_int8_t l3num)
102161 + enum ctattr_type type, u_int8_t l3num)
102162 {
102163 struct nlattr *tb[CTA_TUPLE_MAX+1];
102164 int err;
102165 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
102166 index f900dc3..5e45346 100644
102167 --- a/net/netfilter/nfnetlink_log.c
102168 +++ b/net/netfilter/nfnetlink_log.c
102169 @@ -68,7 +68,7 @@ struct nfulnl_instance {
102170 };
102171
102172 static DEFINE_RWLOCK(instances_lock);
102173 -static atomic_t global_seq;
102174 +static atomic_unchecked_t global_seq;
102175
102176 #define INSTANCE_BUCKETS 16
102177 static struct hlist_head instance_table[INSTANCE_BUCKETS];
102178 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
102179 /* global sequence number */
102180 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
102181 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
102182 - htonl(atomic_inc_return(&global_seq)));
102183 + htonl(atomic_inc_return_unchecked(&global_seq)));
102184
102185 if (data_len) {
102186 struct nlattr *nla;
102187 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
102188 new file mode 100644
102189 index 0000000..b1bac76
102190 --- /dev/null
102191 +++ b/net/netfilter/xt_gradm.c
102192 @@ -0,0 +1,51 @@
102193 +/*
102194 + * gradm match for netfilter
102195 + * Copyright © Zbigniew Krzystolik, 2010
102196 + *
102197 + * This program is free software; you can redistribute it and/or modify
102198 + * it under the terms of the GNU General Public License; either version
102199 + * 2 or 3 as published by the Free Software Foundation.
102200 + */
102201 +#include <linux/module.h>
102202 +#include <linux/moduleparam.h>
102203 +#include <linux/skbuff.h>
102204 +#include <linux/netfilter/x_tables.h>
102205 +#include <linux/grsecurity.h>
102206 +#include <linux/netfilter/xt_gradm.h>
102207 +
102208 +static bool
102209 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
102210 +{
102211 + const struct xt_gradm_mtinfo *info = par->matchinfo;
102212 + bool retval = false;
102213 + if (gr_acl_is_enabled())
102214 + retval = true;
102215 + return retval ^ info->invflags;
102216 +}
102217 +
102218 +static struct xt_match gradm_mt_reg __read_mostly = {
102219 + .name = "gradm",
102220 + .revision = 0,
102221 + .family = NFPROTO_UNSPEC,
102222 + .match = gradm_mt,
102223 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
102224 + .me = THIS_MODULE,
102225 +};
102226 +
102227 +static int __init gradm_mt_init(void)
102228 +{
102229 + return xt_register_match(&gradm_mt_reg);
102230 +}
102231 +
102232 +static void __exit gradm_mt_exit(void)
102233 +{
102234 + xt_unregister_match(&gradm_mt_reg);
102235 +}
102236 +
102237 +module_init(gradm_mt_init);
102238 +module_exit(gradm_mt_exit);
102239 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
102240 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
102241 +MODULE_LICENSE("GPL");
102242 +MODULE_ALIAS("ipt_gradm");
102243 +MODULE_ALIAS("ip6t_gradm");
102244 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
102245 index 5a7dcdf..24a3578 100644
102246 --- a/net/netlink/af_netlink.c
102247 +++ b/net/netlink/af_netlink.c
102248 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock *sk)
102249 sk->sk_error_report(sk);
102250 }
102251 }
102252 - atomic_inc(&sk->sk_drops);
102253 + atomic_inc_unchecked(&sk->sk_drops);
102254 }
102255
102256 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
102257 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
102258 struct netlink_sock *nlk = nlk_sk(s);
102259
102260 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
102261 +#ifdef CONFIG_GRKERNSEC_HIDESYM
102262 + NULL,
102263 +#else
102264 s,
102265 +#endif
102266 s->sk_protocol,
102267 nlk->pid,
102268 nlk->groups ? (u32)nlk->groups[0] : 0,
102269 sk_rmem_alloc_get(s),
102270 sk_wmem_alloc_get(s),
102271 +#ifdef CONFIG_GRKERNSEC_HIDESYM
102272 + NULL,
102273 +#else
102274 nlk->cb,
102275 +#endif
102276 atomic_read(&s->sk_refcnt),
102277 - atomic_read(&s->sk_drops)
102278 + atomic_read_unchecked(&s->sk_drops)
102279 );
102280
102281 }
102282 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
102283 index 7a83495..ab0062f 100644
102284 --- a/net/netrom/af_netrom.c
102285 +++ b/net/netrom/af_netrom.c
102286 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
102287 struct sock *sk = sock->sk;
102288 struct nr_sock *nr = nr_sk(sk);
102289
102290 + memset(sax, 0, sizeof(*sax));
102291 lock_sock(sk);
102292 if (peer != 0) {
102293 if (sk->sk_state != TCP_ESTABLISHED) {
102294 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
102295 *uaddr_len = sizeof(struct full_sockaddr_ax25);
102296 } else {
102297 sax->fsa_ax25.sax25_family = AF_NETROM;
102298 - sax->fsa_ax25.sax25_ndigis = 0;
102299 sax->fsa_ax25.sax25_call = nr->source_addr;
102300 *uaddr_len = sizeof(struct sockaddr_ax25);
102301 }
102302 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
102303 index 35cfa79..4e78ff7 100644
102304 --- a/net/packet/af_packet.c
102305 +++ b/net/packet/af_packet.c
102306 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_file *seq, void *v)
102307
102308 seq_printf(seq,
102309 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
102310 +#ifdef CONFIG_GRKERNSEC_HIDESYM
102311 + NULL,
102312 +#else
102313 s,
102314 +#endif
102315 atomic_read(&s->sk_refcnt),
102316 s->sk_type,
102317 ntohs(po->num),
102318 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
102319 index 519ff9d..a422a90 100644
102320 --- a/net/phonet/af_phonet.c
102321 +++ b/net/phonet/af_phonet.c
102322 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
102323 {
102324 struct phonet_protocol *pp;
102325
102326 - if (protocol >= PHONET_NPROTO)
102327 + if (protocol < 0 || protocol >= PHONET_NPROTO)
102328 return NULL;
102329
102330 spin_lock(&proto_tab_lock);
102331 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_register(int protocol,
102332 {
102333 int err = 0;
102334
102335 - if (protocol >= PHONET_NPROTO)
102336 + if (protocol < 0 || protocol >= PHONET_NPROTO)
102337 return -EINVAL;
102338
102339 err = proto_register(pp->prot, 1);
102340 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
102341 index ef5c75c..2b6c2fa 100644
102342 --- a/net/phonet/datagram.c
102343 +++ b/net/phonet/datagram.c
102344 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
102345 if (err < 0) {
102346 kfree_skb(skb);
102347 if (err == -ENOMEM)
102348 - atomic_inc(&sk->sk_drops);
102349 + atomic_inc_unchecked(&sk->sk_drops);
102350 }
102351 return err ? NET_RX_DROP : NET_RX_SUCCESS;
102352 }
102353 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
102354 index 9cdd35e..16cd850 100644
102355 --- a/net/phonet/pep.c
102356 +++ b/net/phonet/pep.c
102357 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
102358
102359 case PNS_PEP_CTRL_REQ:
102360 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
102361 - atomic_inc(&sk->sk_drops);
102362 + atomic_inc_unchecked(&sk->sk_drops);
102363 break;
102364 }
102365 __skb_pull(skb, 4);
102366 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
102367 if (!err)
102368 return 0;
102369 if (err == -ENOMEM)
102370 - atomic_inc(&sk->sk_drops);
102371 + atomic_inc_unchecked(&sk->sk_drops);
102372 break;
102373 }
102374
102375 if (pn->rx_credits == 0) {
102376 - atomic_inc(&sk->sk_drops);
102377 + atomic_inc_unchecked(&sk->sk_drops);
102378 err = -ENOBUFS;
102379 break;
102380 }
102381 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
102382 index aa5b5a9..c09b4f8 100644
102383 --- a/net/phonet/socket.c
102384 +++ b/net/phonet/socket.c
102385 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
102386 sk->sk_state,
102387 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
102388 sock_i_uid(sk), sock_i_ino(sk),
102389 - atomic_read(&sk->sk_refcnt), sk,
102390 - atomic_read(&sk->sk_drops), &len);
102391 + atomic_read(&sk->sk_refcnt),
102392 +#ifdef CONFIG_GRKERNSEC_HIDESYM
102393 + NULL,
102394 +#else
102395 + sk,
102396 +#endif
102397 + atomic_read_unchecked(&sk->sk_drops), &len);
102398 }
102399 seq_printf(seq, "%*s\n", 127 - len, "");
102400 return 0;
102401 diff --git a/net/rds/Kconfig b/net/rds/Kconfig
102402 index ec753b3..821187c 100644
102403 --- a/net/rds/Kconfig
102404 +++ b/net/rds/Kconfig
102405 @@ -1,7 +1,7 @@
102406
102407 config RDS
102408 tristate "The RDS Protocol (EXPERIMENTAL)"
102409 - depends on INET && EXPERIMENTAL
102410 + depends on INET && EXPERIMENTAL && BROKEN
102411 ---help---
102412 The RDS (Reliable Datagram Sockets) protocol provides reliable,
102413 sequenced delivery of datagrams over Infiniband, iWARP,
102414 diff --git a/net/rds/cong.c b/net/rds/cong.c
102415 index dd2711d..1c7ed12 100644
102416 --- a/net/rds/cong.c
102417 +++ b/net/rds/cong.c
102418 @@ -77,7 +77,7 @@
102419 * finds that the saved generation number is smaller than the global generation
102420 * number, it wakes up the process.
102421 */
102422 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
102423 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
102424
102425 /*
102426 * Congestion monitoring
102427 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
102428 rdsdebug("waking map %p for %pI4\n",
102429 map, &map->m_addr);
102430 rds_stats_inc(s_cong_update_received);
102431 - atomic_inc(&rds_cong_generation);
102432 + atomic_inc_unchecked(&rds_cong_generation);
102433 if (waitqueue_active(&map->m_waitq))
102434 wake_up(&map->m_waitq);
102435 if (waitqueue_active(&rds_poll_waitq))
102436 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
102437
102438 int rds_cong_updated_since(unsigned long *recent)
102439 {
102440 - unsigned long gen = atomic_read(&rds_cong_generation);
102441 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
102442
102443 if (likely(*recent == gen))
102444 return 0;
102445 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
102446 index de4a1b1..94ec861 100644
102447 --- a/net/rds/iw_rdma.c
102448 +++ b/net/rds/iw_rdma.c
102449 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
102450 struct rdma_cm_id *pcm_id;
102451 int rc;
102452
102453 + pax_track_stack();
102454 +
102455 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
102456 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
102457
102458 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
102459 index b5198ae..8b9fb90 100644
102460 --- a/net/rds/tcp.c
102461 +++ b/net/rds/tcp.c
102462 @@ -57,7 +57,7 @@ void rds_tcp_nonagle(struct socket *sock)
102463 int val = 1;
102464
102465 set_fs(KERNEL_DS);
102466 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
102467 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
102468 sizeof(val));
102469 set_fs(oldfs);
102470 }
102471 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
102472 index ab545e0..4079b3b 100644
102473 --- a/net/rds/tcp_send.c
102474 +++ b/net/rds/tcp_send.c
102475 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
102476
102477 oldfs = get_fs();
102478 set_fs(KERNEL_DS);
102479 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
102480 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
102481 sizeof(val));
102482 set_fs(oldfs);
102483 }
102484 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
102485 index a86afce..8657bce 100644
102486 --- a/net/rxrpc/af_rxrpc.c
102487 +++ b/net/rxrpc/af_rxrpc.c
102488 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ops;
102489 __be32 rxrpc_epoch;
102490
102491 /* current debugging ID */
102492 -atomic_t rxrpc_debug_id;
102493 +atomic_unchecked_t rxrpc_debug_id;
102494
102495 /* count of skbs currently in use */
102496 atomic_t rxrpc_n_skbs;
102497 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
102498 index b4a2209..539106c 100644
102499 --- a/net/rxrpc/ar-ack.c
102500 +++ b/net/rxrpc/ar-ack.c
102501 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
102502
102503 _enter("{%d,%d,%d,%d},",
102504 call->acks_hard, call->acks_unacked,
102505 - atomic_read(&call->sequence),
102506 + atomic_read_unchecked(&call->sequence),
102507 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
102508
102509 stop = 0;
102510 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
102511
102512 /* each Tx packet has a new serial number */
102513 sp->hdr.serial =
102514 - htonl(atomic_inc_return(&call->conn->serial));
102515 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
102516
102517 hdr = (struct rxrpc_header *) txb->head;
102518 hdr->serial = sp->hdr.serial;
102519 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
102520 */
102521 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
102522 {
102523 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
102524 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
102525 }
102526
102527 /*
102528 @@ -627,7 +627,7 @@ process_further:
102529
102530 latest = ntohl(sp->hdr.serial);
102531 hard = ntohl(ack.firstPacket);
102532 - tx = atomic_read(&call->sequence);
102533 + tx = atomic_read_unchecked(&call->sequence);
102534
102535 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
102536 latest,
102537 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_struct *work)
102538 u32 abort_code = RX_PROTOCOL_ERROR;
102539 u8 *acks = NULL;
102540
102541 + pax_track_stack();
102542 +
102543 //printk("\n--------------------\n");
102544 _enter("{%d,%s,%lx} [%lu]",
102545 call->debug_id, rxrpc_call_states[call->state], call->events,
102546 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
102547 goto maybe_reschedule;
102548
102549 send_ACK_with_skew:
102550 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
102551 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
102552 ntohl(ack.serial));
102553 send_ACK:
102554 mtu = call->conn->trans->peer->if_mtu;
102555 @@ -1171,7 +1173,7 @@ send_ACK:
102556 ackinfo.rxMTU = htonl(5692);
102557 ackinfo.jumbo_max = htonl(4);
102558
102559 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
102560 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
102561 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
102562 ntohl(hdr.serial),
102563 ntohs(ack.maxSkew),
102564 @@ -1189,7 +1191,7 @@ send_ACK:
102565 send_message:
102566 _debug("send message");
102567
102568 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
102569 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
102570 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
102571 send_message_2:
102572
102573 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
102574 index bc0019f..e1b4b24 100644
102575 --- a/net/rxrpc/ar-call.c
102576 +++ b/net/rxrpc/ar-call.c
102577 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
102578 spin_lock_init(&call->lock);
102579 rwlock_init(&call->state_lock);
102580 atomic_set(&call->usage, 1);
102581 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
102582 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
102583 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
102584
102585 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
102586 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
102587 index 9f1ce84..ff8d061 100644
102588 --- a/net/rxrpc/ar-connection.c
102589 +++ b/net/rxrpc/ar-connection.c
102590 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
102591 rwlock_init(&conn->lock);
102592 spin_lock_init(&conn->state_lock);
102593 atomic_set(&conn->usage, 1);
102594 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
102595 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
102596 conn->avail_calls = RXRPC_MAXCALLS;
102597 conn->size_align = 4;
102598 conn->header_size = sizeof(struct rxrpc_header);
102599 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
102600 index 0505cdc..f0748ce 100644
102601 --- a/net/rxrpc/ar-connevent.c
102602 +++ b/net/rxrpc/ar-connevent.c
102603 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
102604
102605 len = iov[0].iov_len + iov[1].iov_len;
102606
102607 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
102608 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
102609 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
102610
102611 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
102612 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
102613 index f98c802..9e8488e 100644
102614 --- a/net/rxrpc/ar-input.c
102615 +++ b/net/rxrpc/ar-input.c
102616 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
102617 /* track the latest serial number on this connection for ACK packet
102618 * information */
102619 serial = ntohl(sp->hdr.serial);
102620 - hi_serial = atomic_read(&call->conn->hi_serial);
102621 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
102622 while (serial > hi_serial)
102623 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
102624 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
102625 serial);
102626
102627 /* request ACK generation for any ACK or DATA packet that requests
102628 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
102629 index 7043b29..06edcdf 100644
102630 --- a/net/rxrpc/ar-internal.h
102631 +++ b/net/rxrpc/ar-internal.h
102632 @@ -272,8 +272,8 @@ struct rxrpc_connection {
102633 int error; /* error code for local abort */
102634 int debug_id; /* debug ID for printks */
102635 unsigned call_counter; /* call ID counter */
102636 - atomic_t serial; /* packet serial number counter */
102637 - atomic_t hi_serial; /* highest serial number received */
102638 + atomic_unchecked_t serial; /* packet serial number counter */
102639 + atomic_unchecked_t hi_serial; /* highest serial number received */
102640 u8 avail_calls; /* number of calls available */
102641 u8 size_align; /* data size alignment (for security) */
102642 u8 header_size; /* rxrpc + security header size */
102643 @@ -346,7 +346,7 @@ struct rxrpc_call {
102644 spinlock_t lock;
102645 rwlock_t state_lock; /* lock for state transition */
102646 atomic_t usage;
102647 - atomic_t sequence; /* Tx data packet sequence counter */
102648 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
102649 u32 abort_code; /* local/remote abort code */
102650 enum { /* current state of call */
102651 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
102652 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
102653 */
102654 extern atomic_t rxrpc_n_skbs;
102655 extern __be32 rxrpc_epoch;
102656 -extern atomic_t rxrpc_debug_id;
102657 +extern atomic_unchecked_t rxrpc_debug_id;
102658 extern struct workqueue_struct *rxrpc_workqueue;
102659
102660 /*
102661 diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
102662 index 74697b2..10f9b77 100644
102663 --- a/net/rxrpc/ar-key.c
102664 +++ b/net/rxrpc/ar-key.c
102665 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
102666 return ret;
102667
102668 plen -= sizeof(*token);
102669 - token = kmalloc(sizeof(*token), GFP_KERNEL);
102670 + token = kzalloc(sizeof(*token), GFP_KERNEL);
102671 if (!token)
102672 return -ENOMEM;
102673
102674 - token->kad = kmalloc(plen, GFP_KERNEL);
102675 + token->kad = kzalloc(plen, GFP_KERNEL);
102676 if (!token->kad) {
102677 kfree(token);
102678 return -ENOMEM;
102679 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
102680 goto error;
102681
102682 ret = -ENOMEM;
102683 - token = kmalloc(sizeof(*token), GFP_KERNEL);
102684 + token = kzalloc(sizeof(*token), GFP_KERNEL);
102685 if (!token)
102686 goto error;
102687 - token->kad = kmalloc(plen, GFP_KERNEL);
102688 + token->kad = kzalloc(plen, GFP_KERNEL);
102689 if (!token->kad)
102690 goto error_free;
102691
102692 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
102693 index 807535f..5b7f19e 100644
102694 --- a/net/rxrpc/ar-local.c
102695 +++ b/net/rxrpc/ar-local.c
102696 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
102697 spin_lock_init(&local->lock);
102698 rwlock_init(&local->services_lock);
102699 atomic_set(&local->usage, 1);
102700 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
102701 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
102702 memcpy(&local->srx, srx, sizeof(*srx));
102703 }
102704
102705 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
102706 index cc9102c..7d3888e 100644
102707 --- a/net/rxrpc/ar-output.c
102708 +++ b/net/rxrpc/ar-output.c
102709 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
102710 sp->hdr.cid = call->cid;
102711 sp->hdr.callNumber = call->call_id;
102712 sp->hdr.seq =
102713 - htonl(atomic_inc_return(&call->sequence));
102714 + htonl(atomic_inc_return_unchecked(&call->sequence));
102715 sp->hdr.serial =
102716 - htonl(atomic_inc_return(&conn->serial));
102717 + htonl(atomic_inc_return_unchecked(&conn->serial));
102718 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
102719 sp->hdr.userStatus = 0;
102720 sp->hdr.securityIndex = conn->security_ix;
102721 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
102722 index edc026c..4bd4e2d 100644
102723 --- a/net/rxrpc/ar-peer.c
102724 +++ b/net/rxrpc/ar-peer.c
102725 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
102726 INIT_LIST_HEAD(&peer->error_targets);
102727 spin_lock_init(&peer->lock);
102728 atomic_set(&peer->usage, 1);
102729 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
102730 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
102731 memcpy(&peer->srx, srx, sizeof(*srx));
102732
102733 rxrpc_assess_MTU_size(peer);
102734 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
102735 index 38047f7..9f48511 100644
102736 --- a/net/rxrpc/ar-proc.c
102737 +++ b/net/rxrpc/ar-proc.c
102738 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
102739 atomic_read(&conn->usage),
102740 rxrpc_conn_states[conn->state],
102741 key_serial(conn->key),
102742 - atomic_read(&conn->serial),
102743 - atomic_read(&conn->hi_serial));
102744 + atomic_read_unchecked(&conn->serial),
102745 + atomic_read_unchecked(&conn->hi_serial));
102746
102747 return 0;
102748 }
102749 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
102750 index 0936e1a..437c640 100644
102751 --- a/net/rxrpc/ar-transport.c
102752 +++ b/net/rxrpc/ar-transport.c
102753 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
102754 spin_lock_init(&trans->client_lock);
102755 rwlock_init(&trans->conn_lock);
102756 atomic_set(&trans->usage, 1);
102757 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
102758 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
102759
102760 if (peer->srx.transport.family == AF_INET) {
102761 switch (peer->srx.transport_type) {
102762 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
102763 index 713ac59..306f6ae 100644
102764 --- a/net/rxrpc/rxkad.c
102765 +++ b/net/rxrpc/rxkad.c
102766 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
102767 u16 check;
102768 int nsg;
102769
102770 + pax_track_stack();
102771 +
102772 sp = rxrpc_skb(skb);
102773
102774 _enter("");
102775 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
102776 u16 check;
102777 int nsg;
102778
102779 + pax_track_stack();
102780 +
102781 _enter("");
102782
102783 sp = rxrpc_skb(skb);
102784 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
102785
102786 len = iov[0].iov_len + iov[1].iov_len;
102787
102788 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
102789 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
102790 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
102791
102792 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
102793 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
102794
102795 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
102796
102797 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
102798 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
102799 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
102800
102801 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
102802 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
102803 index 914c419..7a16d2c 100644
102804 --- a/net/sctp/auth.c
102805 +++ b/net/sctp/auth.c
102806 @@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
102807 struct sctp_auth_bytes *key;
102808
102809 /* Verify that we are not going to overflow INT_MAX */
102810 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
102811 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
102812 return NULL;
102813
102814 /* Allocate the shared key */
102815 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
102816 index d093cbf..9fc36fc 100644
102817 --- a/net/sctp/proc.c
102818 +++ b/net/sctp/proc.c
102819 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
102820 sctp_for_each_hentry(epb, node, &head->chain) {
102821 ep = sctp_ep(epb);
102822 sk = epb->sk;
102823 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
102824 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
102825 +#ifdef CONFIG_GRKERNSEC_HIDESYM
102826 + NULL, NULL,
102827 +#else
102828 + ep, sk,
102829 +#endif
102830 sctp_sk(sk)->type, sk->sk_state, hash,
102831 epb->bind_addr.port,
102832 sock_i_uid(sk), sock_i_ino(sk));
102833 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
102834 seq_printf(seq,
102835 "%8p %8p %-3d %-3d %-2d %-4d "
102836 "%4d %8d %8d %7d %5lu %-5d %5d ",
102837 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
102838 +#ifdef CONFIG_GRKERNSEC_HIDESYM
102839 + NULL, NULL,
102840 +#else
102841 + assoc, sk,
102842 +#endif
102843 + sctp_sk(sk)->type, sk->sk_state,
102844 assoc->state, hash,
102845 assoc->assoc_id,
102846 assoc->sndbuf_used,
102847 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
102848 index 3a95fcb..c40fc1d 100644
102849 --- a/net/sctp/socket.c
102850 +++ b/net/sctp/socket.c
102851 @@ -5802,7 +5802,6 @@ pp_found:
102852 */
102853 int reuse = sk->sk_reuse;
102854 struct sock *sk2;
102855 - struct hlist_node *node;
102856
102857 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
102858 if (pp->fastreuse && sk->sk_reuse &&
102859 diff --git a/net/socket.c b/net/socket.c
102860 index d449812..4ac08d3c 100644
102861 --- a/net/socket.c
102862 +++ b/net/socket.c
102863 @@ -87,6 +87,7 @@
102864 #include <linux/wireless.h>
102865 #include <linux/nsproxy.h>
102866 #include <linux/magic.h>
102867 +#include <linux/in.h>
102868
102869 #include <asm/uaccess.h>
102870 #include <asm/unistd.h>
102871 @@ -97,6 +98,21 @@
102872 #include <net/sock.h>
102873 #include <linux/netfilter.h>
102874
102875 +extern void gr_attach_curr_ip(const struct sock *sk);
102876 +extern int gr_handle_sock_all(const int family, const int type,
102877 + const int protocol);
102878 +extern int gr_handle_sock_server(const struct sockaddr *sck);
102879 +extern int gr_handle_sock_server_other(const struct sock *sck);
102880 +extern int gr_handle_sock_client(const struct sockaddr *sck);
102881 +extern int gr_search_connect(struct socket * sock,
102882 + struct sockaddr_in * addr);
102883 +extern int gr_search_bind(struct socket * sock,
102884 + struct sockaddr_in * addr);
102885 +extern int gr_search_listen(struct socket * sock);
102886 +extern int gr_search_accept(struct socket * sock);
102887 +extern int gr_search_socket(const int domain, const int type,
102888 + const int protocol);
102889 +
102890 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
102891 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
102892 unsigned long nr_segs, loff_t pos);
102893 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type,
102894 mnt);
102895 }
102896
102897 -static struct vfsmount *sock_mnt __read_mostly;
102898 +struct vfsmount *sock_mnt __read_mostly;
102899
102900 static struct file_system_type sock_fs_type = {
102901 .name = "sockfs",
102902 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
102903 return -EAFNOSUPPORT;
102904 if (type < 0 || type >= SOCK_MAX)
102905 return -EINVAL;
102906 + if (protocol < 0)
102907 + return -EINVAL;
102908
102909 /* Compatibility.
102910
102911 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
102912 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
102913 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
102914
102915 + if(!gr_search_socket(family, type, protocol)) {
102916 + retval = -EACCES;
102917 + goto out;
102918 + }
102919 +
102920 + if (gr_handle_sock_all(family, type, protocol)) {
102921 + retval = -EACCES;
102922 + goto out;
102923 + }
102924 +
102925 retval = sock_create(family, type, protocol, &sock);
102926 if (retval < 0)
102927 goto out;
102928 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
102929 if (sock) {
102930 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
102931 if (err >= 0) {
102932 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
102933 + err = -EACCES;
102934 + goto error;
102935 + }
102936 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
102937 + if (err)
102938 + goto error;
102939 +
102940 err = security_socket_bind(sock,
102941 (struct sockaddr *)&address,
102942 addrlen);
102943 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
102944 (struct sockaddr *)
102945 &address, addrlen);
102946 }
102947 +error:
102948 fput_light(sock->file, fput_needed);
102949 }
102950 return err;
102951 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
102952 if ((unsigned)backlog > somaxconn)
102953 backlog = somaxconn;
102954
102955 + if (gr_handle_sock_server_other(sock->sk)) {
102956 + err = -EPERM;
102957 + goto error;
102958 + }
102959 +
102960 + err = gr_search_listen(sock);
102961 + if (err)
102962 + goto error;
102963 +
102964 err = security_socket_listen(sock, backlog);
102965 if (!err)
102966 err = sock->ops->listen(sock, backlog);
102967
102968 +error:
102969 fput_light(sock->file, fput_needed);
102970 }
102971 return err;
102972 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
102973 newsock->type = sock->type;
102974 newsock->ops = sock->ops;
102975
102976 + if (gr_handle_sock_server_other(sock->sk)) {
102977 + err = -EPERM;
102978 + sock_release(newsock);
102979 + goto out_put;
102980 + }
102981 +
102982 + err = gr_search_accept(sock);
102983 + if (err) {
102984 + sock_release(newsock);
102985 + goto out_put;
102986 + }
102987 +
102988 /*
102989 * We don't need try_module_get here, as the listening socket (sock)
102990 * has the protocol module (sock->ops->owner) held.
102991 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
102992 fd_install(newfd, newfile);
102993 err = newfd;
102994
102995 + gr_attach_curr_ip(newsock->sk);
102996 +
102997 out_put:
102998 fput_light(sock->file, fput_needed);
102999 out:
103000 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
103001 int, addrlen)
103002 {
103003 struct socket *sock;
103004 + struct sockaddr *sck;
103005 struct sockaddr_storage address;
103006 int err, fput_needed;
103007
103008 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
103009 if (err < 0)
103010 goto out_put;
103011
103012 + sck = (struct sockaddr *)&address;
103013 +
103014 + if (gr_handle_sock_client(sck)) {
103015 + err = -EACCES;
103016 + goto out_put;
103017 + }
103018 +
103019 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
103020 + if (err)
103021 + goto out_put;
103022 +
103023 err =
103024 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
103025 if (err)
103026 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
103027 int err, ctl_len, iov_size, total_len;
103028 int fput_needed;
103029
103030 + pax_track_stack();
103031 +
103032 err = -EFAULT;
103033 if (MSG_CMSG_COMPAT & flags) {
103034 if (get_compat_msghdr(&msg_sys, msg_compat))
103035 @@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
103036 * kernel msghdr to use the kernel address space)
103037 */
103038
103039 - uaddr = (__force void __user *)msg_sys.msg_name;
103040 + uaddr = (void __force_user *)msg_sys.msg_name;
103041 uaddr_len = COMPAT_NAMELEN(msg);
103042 if (MSG_CMSG_COMPAT & flags) {
103043 err = verify_compat_iovec(&msg_sys, iov,
103044 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
103045 index ac94477..8afe5c3 100644
103046 --- a/net/sunrpc/sched.c
103047 +++ b/net/sunrpc/sched.c
103048 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *word)
103049 #ifdef RPC_DEBUG
103050 static void rpc_task_set_debuginfo(struct rpc_task *task)
103051 {
103052 - static atomic_t rpc_pid;
103053 + static atomic_unchecked_t rpc_pid;
103054
103055 task->tk_magic = RPC_TASK_MAGIC_ID;
103056 - task->tk_pid = atomic_inc_return(&rpc_pid);
103057 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
103058 }
103059 #else
103060 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
103061 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
103062 index 35fb68b..236a8bf 100644
103063 --- a/net/sunrpc/xprtrdma/svc_rdma.c
103064 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
103065 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
103066 static unsigned int min_max_inline = 4096;
103067 static unsigned int max_max_inline = 65536;
103068
103069 -atomic_t rdma_stat_recv;
103070 -atomic_t rdma_stat_read;
103071 -atomic_t rdma_stat_write;
103072 -atomic_t rdma_stat_sq_starve;
103073 -atomic_t rdma_stat_rq_starve;
103074 -atomic_t rdma_stat_rq_poll;
103075 -atomic_t rdma_stat_rq_prod;
103076 -atomic_t rdma_stat_sq_poll;
103077 -atomic_t rdma_stat_sq_prod;
103078 +atomic_unchecked_t rdma_stat_recv;
103079 +atomic_unchecked_t rdma_stat_read;
103080 +atomic_unchecked_t rdma_stat_write;
103081 +atomic_unchecked_t rdma_stat_sq_starve;
103082 +atomic_unchecked_t rdma_stat_rq_starve;
103083 +atomic_unchecked_t rdma_stat_rq_poll;
103084 +atomic_unchecked_t rdma_stat_rq_prod;
103085 +atomic_unchecked_t rdma_stat_sq_poll;
103086 +atomic_unchecked_t rdma_stat_sq_prod;
103087
103088 /* Temporary NFS request map and context caches */
103089 struct kmem_cache *svc_rdma_map_cachep;
103090 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *table, int write,
103091 len -= *ppos;
103092 if (len > *lenp)
103093 len = *lenp;
103094 - if (len && copy_to_user(buffer, str_buf, len))
103095 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
103096 return -EFAULT;
103097 *lenp = len;
103098 *ppos += len;
103099 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = {
103100 {
103101 .procname = "rdma_stat_read",
103102 .data = &rdma_stat_read,
103103 - .maxlen = sizeof(atomic_t),
103104 + .maxlen = sizeof(atomic_unchecked_t),
103105 .mode = 0644,
103106 .proc_handler = &read_reset_stat,
103107 },
103108 {
103109 .procname = "rdma_stat_recv",
103110 .data = &rdma_stat_recv,
103111 - .maxlen = sizeof(atomic_t),
103112 + .maxlen = sizeof(atomic_unchecked_t),
103113 .mode = 0644,
103114 .proc_handler = &read_reset_stat,
103115 },
103116 {
103117 .procname = "rdma_stat_write",
103118 .data = &rdma_stat_write,
103119 - .maxlen = sizeof(atomic_t),
103120 + .maxlen = sizeof(atomic_unchecked_t),
103121 .mode = 0644,
103122 .proc_handler = &read_reset_stat,
103123 },
103124 {
103125 .procname = "rdma_stat_sq_starve",
103126 .data = &rdma_stat_sq_starve,
103127 - .maxlen = sizeof(atomic_t),
103128 + .maxlen = sizeof(atomic_unchecked_t),
103129 .mode = 0644,
103130 .proc_handler = &read_reset_stat,
103131 },
103132 {
103133 .procname = "rdma_stat_rq_starve",
103134 .data = &rdma_stat_rq_starve,
103135 - .maxlen = sizeof(atomic_t),
103136 + .maxlen = sizeof(atomic_unchecked_t),
103137 .mode = 0644,
103138 .proc_handler = &read_reset_stat,
103139 },
103140 {
103141 .procname = "rdma_stat_rq_poll",
103142 .data = &rdma_stat_rq_poll,
103143 - .maxlen = sizeof(atomic_t),
103144 + .maxlen = sizeof(atomic_unchecked_t),
103145 .mode = 0644,
103146 .proc_handler = &read_reset_stat,
103147 },
103148 {
103149 .procname = "rdma_stat_rq_prod",
103150 .data = &rdma_stat_rq_prod,
103151 - .maxlen = sizeof(atomic_t),
103152 + .maxlen = sizeof(atomic_unchecked_t),
103153 .mode = 0644,
103154 .proc_handler = &read_reset_stat,
103155 },
103156 {
103157 .procname = "rdma_stat_sq_poll",
103158 .data = &rdma_stat_sq_poll,
103159 - .maxlen = sizeof(atomic_t),
103160 + .maxlen = sizeof(atomic_unchecked_t),
103161 .mode = 0644,
103162 .proc_handler = &read_reset_stat,
103163 },
103164 {
103165 .procname = "rdma_stat_sq_prod",
103166 .data = &rdma_stat_sq_prod,
103167 - .maxlen = sizeof(atomic_t),
103168 + .maxlen = sizeof(atomic_unchecked_t),
103169 .mode = 0644,
103170 .proc_handler = &read_reset_stat,
103171 },
103172 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
103173 index 9e88438..8ed5cf0 100644
103174 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
103175 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
103176 @@ -495,7 +495,7 @@ next_sge:
103177 svc_rdma_put_context(ctxt, 0);
103178 goto out;
103179 }
103180 - atomic_inc(&rdma_stat_read);
103181 + atomic_inc_unchecked(&rdma_stat_read);
103182
103183 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
103184 chl_map->ch[ch_no].count -= read_wr.num_sge;
103185 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
103186 dto_q);
103187 list_del_init(&ctxt->dto_q);
103188 } else {
103189 - atomic_inc(&rdma_stat_rq_starve);
103190 + atomic_inc_unchecked(&rdma_stat_rq_starve);
103191 clear_bit(XPT_DATA, &xprt->xpt_flags);
103192 ctxt = NULL;
103193 }
103194 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
103195 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
103196 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
103197 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
103198 - atomic_inc(&rdma_stat_recv);
103199 + atomic_inc_unchecked(&rdma_stat_recv);
103200
103201 /* Build up the XDR from the receive buffers. */
103202 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
103203 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
103204 index f11be72..7aad4e8 100644
103205 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
103206 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
103207 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
103208 write_wr.wr.rdma.remote_addr = to;
103209
103210 /* Post It */
103211 - atomic_inc(&rdma_stat_write);
103212 + atomic_inc_unchecked(&rdma_stat_write);
103213 if (svc_rdma_send(xprt, &write_wr))
103214 goto err;
103215 return 0;
103216 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
103217 index 3fa5751..030ba89 100644
103218 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
103219 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
103220 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
103221 return;
103222
103223 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
103224 - atomic_inc(&rdma_stat_rq_poll);
103225 + atomic_inc_unchecked(&rdma_stat_rq_poll);
103226
103227 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
103228 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
103229 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
103230 }
103231
103232 if (ctxt)
103233 - atomic_inc(&rdma_stat_rq_prod);
103234 + atomic_inc_unchecked(&rdma_stat_rq_prod);
103235
103236 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
103237 /*
103238 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
103239 return;
103240
103241 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
103242 - atomic_inc(&rdma_stat_sq_poll);
103243 + atomic_inc_unchecked(&rdma_stat_sq_poll);
103244 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
103245 if (wc.status != IB_WC_SUCCESS)
103246 /* Close the transport */
103247 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
103248 }
103249
103250 if (ctxt)
103251 - atomic_inc(&rdma_stat_sq_prod);
103252 + atomic_inc_unchecked(&rdma_stat_sq_prod);
103253 }
103254
103255 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
103256 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
103257 spin_lock_bh(&xprt->sc_lock);
103258 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
103259 spin_unlock_bh(&xprt->sc_lock);
103260 - atomic_inc(&rdma_stat_sq_starve);
103261 + atomic_inc_unchecked(&rdma_stat_sq_starve);
103262
103263 /* See if we can opportunistically reap SQ WR to make room */
103264 sq_cq_reap(xprt);
103265 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
103266 index 0b15d72..7934fbb 100644
103267 --- a/net/sysctl_net.c
103268 +++ b/net/sysctl_net.c
103269 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
103270 struct ctl_table *table)
103271 {
103272 /* Allow network administrator to have same access as root. */
103273 - if (capable(CAP_NET_ADMIN)) {
103274 + if (capable_nolog(CAP_NET_ADMIN)) {
103275 int mode = (table->mode >> 6) & 7;
103276 return (mode << 6) | (mode << 3) | mode;
103277 }
103278 diff --git a/net/tipc/link.c b/net/tipc/link.c
103279 index dd4c18b..f40d38d 100644
103280 --- a/net/tipc/link.c
103281 +++ b/net/tipc/link.c
103282 @@ -1418,7 +1418,7 @@ again:
103283
103284 if (!sect_rest) {
103285 sect_rest = msg_sect[++curr_sect].iov_len;
103286 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
103287 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
103288 }
103289
103290 if (sect_rest < fragm_rest)
103291 @@ -1437,7 +1437,7 @@ error:
103292 }
103293 } else
103294 skb_copy_to_linear_data_offset(buf, fragm_crs,
103295 - sect_crs, sz);
103296 + (const void __force_kernel *)sect_crs, sz);
103297 sect_crs += sz;
103298 sect_rest -= sz;
103299 fragm_crs += sz;
103300 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
103301 index 0747d8a..e8bf3f3 100644
103302 --- a/net/tipc/subscr.c
103303 +++ b/net/tipc/subscr.c
103304 @@ -104,7 +104,7 @@ static void subscr_send_event(struct subscription *sub,
103305 {
103306 struct iovec msg_sect;
103307
103308 - msg_sect.iov_base = (void *)&sub->evt;
103309 + msg_sect.iov_base = (void __force_user *)&sub->evt;
103310 msg_sect.iov_len = sizeof(struct tipc_event);
103311
103312 sub->evt.event = htohl(event, sub->swap);
103313 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
103314 index db8d51a..608692d 100644
103315 --- a/net/unix/af_unix.c
103316 +++ b/net/unix/af_unix.c
103317 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(struct net *net,
103318 err = -ECONNREFUSED;
103319 if (!S_ISSOCK(inode->i_mode))
103320 goto put_fail;
103321 +
103322 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
103323 + err = -EACCES;
103324 + goto put_fail;
103325 + }
103326 +
103327 u = unix_find_socket_byinode(net, inode);
103328 if (!u)
103329 goto put_fail;
103330 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(struct net *net,
103331 if (u) {
103332 struct dentry *dentry;
103333 dentry = unix_sk(u)->dentry;
103334 +
103335 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
103336 + err = -EPERM;
103337 + sock_put(u);
103338 + goto fail;
103339 + }
103340 +
103341 if (dentry)
103342 touch_atime(unix_sk(u)->mnt, dentry);
103343 } else
103344 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
103345 err = security_path_mknod(&nd.path, dentry, mode, 0);
103346 if (err)
103347 goto out_mknod_drop_write;
103348 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
103349 + err = -EACCES;
103350 + goto out_mknod_drop_write;
103351 + }
103352 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
103353 out_mknod_drop_write:
103354 mnt_drop_write(nd.path.mnt);
103355 if (err)
103356 goto out_mknod_dput;
103357 +
103358 + gr_handle_create(dentry, nd.path.mnt);
103359 +
103360 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
103361 dput(nd.path.dentry);
103362 nd.path.dentry = dentry;
103363 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file *seq, void *v)
103364 unix_state_lock(s);
103365
103366 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
103367 +#ifdef CONFIG_GRKERNSEC_HIDESYM
103368 + NULL,
103369 +#else
103370 s,
103371 +#endif
103372 atomic_read(&s->sk_refcnt),
103373 0,
103374 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
103375 diff --git a/net/wireless/core.h b/net/wireless/core.h
103376 index 376798f..109a61f 100644
103377 --- a/net/wireless/core.h
103378 +++ b/net/wireless/core.h
103379 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
103380 struct mutex mtx;
103381
103382 /* rfkill support */
103383 - struct rfkill_ops rfkill_ops;
103384 + rfkill_ops_no_const rfkill_ops;
103385 struct rfkill *rfkill;
103386 struct work_struct rfkill_sync;
103387
103388 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
103389 index a2e4c60..0979cbe 100644
103390 --- a/net/wireless/wext.c
103391 +++ b/net/wireless/wext.c
103392 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
103393 */
103394
103395 /* Support for very large requests */
103396 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
103397 - (user_length > descr->max_tokens)) {
103398 + if (user_length > descr->max_tokens) {
103399 /* Allow userspace to GET more than max so
103400 * we can support any size GET requests.
103401 * There is still a limit : -ENOMEM.
103402 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
103403 }
103404 }
103405
103406 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
103407 - /*
103408 - * If this is a GET, but not NOMAX, it means that the extra
103409 - * data is not bounded by userspace, but by max_tokens. Thus
103410 - * set the length to max_tokens. This matches the extra data
103411 - * allocation.
103412 - * The driver should fill it with the number of tokens it
103413 - * provided, and it may check iwp->length rather than having
103414 - * knowledge of max_tokens. If the driver doesn't change the
103415 - * iwp->length, this ioctl just copies back max_token tokens
103416 - * filled with zeroes. Hopefully the driver isn't claiming
103417 - * them to be valid data.
103418 - */
103419 - iwp->length = descr->max_tokens;
103420 - }
103421 -
103422 err = handler(dev, info, (union iwreq_data *) iwp, extra);
103423
103424 iwp->length += essid_compat;
103425 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
103426 index cb81ca3..e15d49a 100644
103427 --- a/net/xfrm/xfrm_policy.c
103428 +++ b/net/xfrm/xfrm_policy.c
103429 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
103430 hlist_add_head(&policy->bydst, chain);
103431 xfrm_pol_hold(policy);
103432 net->xfrm.policy_count[dir]++;
103433 - atomic_inc(&flow_cache_genid);
103434 + atomic_inc_unchecked(&flow_cache_genid);
103435 if (delpol)
103436 __xfrm_policy_unlink(delpol, dir);
103437 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
103438 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
103439 write_unlock_bh(&xfrm_policy_lock);
103440
103441 if (ret && delete) {
103442 - atomic_inc(&flow_cache_genid);
103443 + atomic_inc_unchecked(&flow_cache_genid);
103444 xfrm_policy_kill(ret);
103445 }
103446 return ret;
103447 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
103448 write_unlock_bh(&xfrm_policy_lock);
103449
103450 if (ret && delete) {
103451 - atomic_inc(&flow_cache_genid);
103452 + atomic_inc_unchecked(&flow_cache_genid);
103453 xfrm_policy_kill(ret);
103454 }
103455 return ret;
103456 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
103457 }
103458
103459 }
103460 - atomic_inc(&flow_cache_genid);
103461 + atomic_inc_unchecked(&flow_cache_genid);
103462 out:
103463 write_unlock_bh(&xfrm_policy_lock);
103464 return err;
103465 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
103466 write_unlock_bh(&xfrm_policy_lock);
103467 if (pol) {
103468 if (dir < XFRM_POLICY_MAX)
103469 - atomic_inc(&flow_cache_genid);
103470 + atomic_inc_unchecked(&flow_cache_genid);
103471 xfrm_policy_kill(pol);
103472 return 0;
103473 }
103474 @@ -1477,7 +1477,7 @@ free_dst:
103475 goto out;
103476 }
103477
103478 -static int inline
103479 +static inline int
103480 xfrm_dst_alloc_copy(void **target, void *src, int size)
103481 {
103482 if (!*target) {
103483 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
103484 return 0;
103485 }
103486
103487 -static int inline
103488 +static inline int
103489 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
103490 {
103491 #ifdef CONFIG_XFRM_SUB_POLICY
103492 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
103493 #endif
103494 }
103495
103496 -static int inline
103497 +static inline int
103498 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
103499 {
103500 #ifdef CONFIG_XFRM_SUB_POLICY
103501 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
103502 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
103503
103504 restart:
103505 - genid = atomic_read(&flow_cache_genid);
103506 + genid = atomic_read_unchecked(&flow_cache_genid);
103507 policy = NULL;
103508 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
103509 pols[pi] = NULL;
103510 @@ -1680,7 +1680,7 @@ restart:
103511 goto error;
103512 }
103513 if (nx == -EAGAIN ||
103514 - genid != atomic_read(&flow_cache_genid)) {
103515 + genid != atomic_read_unchecked(&flow_cache_genid)) {
103516 xfrm_pols_put(pols, npols);
103517 goto restart;
103518 }
103519 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
103520 index b95a2d6..85c4d78 100644
103521 --- a/net/xfrm/xfrm_user.c
103522 +++ b/net/xfrm/xfrm_user.c
103523 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
103524 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
103525 int i;
103526
103527 + pax_track_stack();
103528 +
103529 if (xp->xfrm_nr == 0)
103530 return 0;
103531
103532 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
103533 int err;
103534 int n = 0;
103535
103536 + pax_track_stack();
103537 +
103538 if (attrs[XFRMA_MIGRATE] == NULL)
103539 return -EINVAL;
103540
103541 diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
103542 index 45b7d56..19e828c 100644
103543 --- a/samples/kobject/kset-example.c
103544 +++ b/samples/kobject/kset-example.c
103545 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kobject *kobj,
103546 }
103547
103548 /* Our custom sysfs_ops that we will associate with our ktype later on */
103549 -static struct sysfs_ops foo_sysfs_ops = {
103550 +static const struct sysfs_ops foo_sysfs_ops = {
103551 .show = foo_attr_show,
103552 .store = foo_attr_store,
103553 };
103554 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
103555 index 341b589..405aed3 100644
103556 --- a/scripts/Makefile.build
103557 +++ b/scripts/Makefile.build
103558 @@ -59,7 +59,7 @@ endif
103559 endif
103560
103561 # Do not include host rules unless needed
103562 -ifneq ($(hostprogs-y)$(hostprogs-m),)
103563 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
103564 include scripts/Makefile.host
103565 endif
103566
103567 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
103568 index 6f89fbb..53adc9c 100644
103569 --- a/scripts/Makefile.clean
103570 +++ b/scripts/Makefile.clean
103571 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
103572 __clean-files := $(extra-y) $(always) \
103573 $(targets) $(clean-files) \
103574 $(host-progs) \
103575 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
103576 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
103577 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
103578
103579 # as clean-files is given relative to the current directory, this adds
103580 # a $(obj) prefix, except for absolute paths
103581 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
103582 index 1ac414f..a1c1451 100644
103583 --- a/scripts/Makefile.host
103584 +++ b/scripts/Makefile.host
103585 @@ -31,6 +31,7 @@
103586 # Note: Shared libraries consisting of C++ files are not supported
103587
103588 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
103589 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
103590
103591 # C code
103592 # Executables compiled from a single .c file
103593 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
103594 # Shared libaries (only .c supported)
103595 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
103596 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
103597 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
103598 # Remove .so files from "xxx-objs"
103599 host-cobjs := $(filter-out %.so,$(host-cobjs))
103600
103601 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
103602 index 6bf21f8..c0546b3 100644
103603 --- a/scripts/basic/fixdep.c
103604 +++ b/scripts/basic/fixdep.c
103605 @@ -162,7 +162,7 @@ static void grow_config(int len)
103606 /*
103607 * Lookup a value in the configuration string.
103608 */
103609 -static int is_defined_config(const char * name, int len)
103610 +static int is_defined_config(const char * name, unsigned int len)
103611 {
103612 const char * pconfig;
103613 const char * plast = str_config + len_config - len;
103614 @@ -199,7 +199,7 @@ static void clear_config(void)
103615 /*
103616 * Record the use of a CONFIG_* word.
103617 */
103618 -static void use_config(char *m, int slen)
103619 +static void use_config(char *m, unsigned int slen)
103620 {
103621 char s[PATH_MAX];
103622 char *p;
103623 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen)
103624
103625 static void parse_config_file(char *map, size_t len)
103626 {
103627 - int *end = (int *) (map + len);
103628 + unsigned int *end = (unsigned int *) (map + len);
103629 /* start at +1, so that p can never be < map */
103630 - int *m = (int *) map + 1;
103631 + unsigned int *m = (unsigned int *) map + 1;
103632 char *p, *q;
103633
103634 for (; m < end; m++) {
103635 @@ -371,7 +371,7 @@ static void print_deps(void)
103636 static void traps(void)
103637 {
103638 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
103639 - int *p = (int *)test;
103640 + unsigned int *p = (unsigned int *)test;
103641
103642 if (*p != INT_CONF) {
103643 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
103644 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
103645 new file mode 100644
103646 index 0000000..8729101
103647 --- /dev/null
103648 +++ b/scripts/gcc-plugin.sh
103649 @@ -0,0 +1,2 @@
103650 +#!/bin/sh
103651 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
103652 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
103653 index 62a9025..65b82ad 100644
103654 --- a/scripts/mod/file2alias.c
103655 +++ b/scripts/mod/file2alias.c
103656 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
103657 unsigned long size, unsigned long id_size,
103658 void *symval)
103659 {
103660 - int i;
103661 + unsigned int i;
103662
103663 if (size % id_size || size < id_size) {
103664 if (cross_build != 0)
103665 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
103666 /* USB is special because the bcdDevice can be matched against a numeric range */
103667 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
103668 static void do_usb_entry(struct usb_device_id *id,
103669 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
103670 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
103671 unsigned char range_lo, unsigned char range_hi,
103672 struct module *mod)
103673 {
103674 @@ -151,7 +151,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
103675 {
103676 unsigned int devlo, devhi;
103677 unsigned char chi, clo;
103678 - int ndigits;
103679 + unsigned int ndigits;
103680
103681 id->match_flags = TO_NATIVE(id->match_flags);
103682 id->idVendor = TO_NATIVE(id->idVendor);
103683 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
103684 for (i = 0; i < count; i++) {
103685 const char *id = (char *)devs[i].id;
103686 char acpi_id[sizeof(devs[0].id)];
103687 - int j;
103688 + unsigned int j;
103689
103690 buf_printf(&mod->dev_table_buf,
103691 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
103692 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
103693
103694 for (j = 0; j < PNP_MAX_DEVICES; j++) {
103695 const char *id = (char *)card->devs[j].id;
103696 - int i2, j2;
103697 + unsigned int i2, j2;
103698 int dup = 0;
103699
103700 if (!id[0])
103701 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
103702 /* add an individual alias for every device entry */
103703 if (!dup) {
103704 char acpi_id[sizeof(card->devs[0].id)];
103705 - int k;
103706 + unsigned int k;
103707
103708 buf_printf(&mod->dev_table_buf,
103709 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
103710 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, const char *s)
103711 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
103712 char *alias)
103713 {
103714 - int i, j;
103715 + unsigned int i, j;
103716
103717 sprintf(alias, "dmi*");
103718
103719 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
103720 index 03efeab..0888989 100644
103721 --- a/scripts/mod/modpost.c
103722 +++ b/scripts/mod/modpost.c
103723 @@ -835,6 +835,7 @@ enum mismatch {
103724 INIT_TO_EXIT,
103725 EXIT_TO_INIT,
103726 EXPORT_TO_INIT_EXIT,
103727 + DATA_TO_TEXT
103728 };
103729
103730 struct sectioncheck {
103731 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[] = {
103732 .fromsec = { "__ksymtab*", NULL },
103733 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
103734 .mismatch = EXPORT_TO_INIT_EXIT
103735 +},
103736 +/* Do not reference code from writable data */
103737 +{
103738 + .fromsec = { DATA_SECTIONS, NULL },
103739 + .tosec = { TEXT_SECTIONS, NULL },
103740 + .mismatch = DATA_TO_TEXT
103741 }
103742 };
103743
103744 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
103745 continue;
103746 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
103747 continue;
103748 - if (sym->st_value == addr)
103749 - return sym;
103750 /* Find a symbol nearby - addr are maybe negative */
103751 d = sym->st_value - addr;
103752 + if (d == 0)
103753 + return sym;
103754 if (d < 0)
103755 d = addr - sym->st_value;
103756 if (d < distance) {
103757 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
103758 "Fix this by removing the %sannotation of %s "
103759 "or drop the export.\n",
103760 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
103761 + case DATA_TO_TEXT:
103762 +/*
103763 + fprintf(stderr,
103764 + "The variable %s references\n"
103765 + "the %s %s%s%s\n",
103766 + fromsym, to, sec2annotation(tosec), tosym, to_p);
103767 +*/
103768 + break;
103769 case NO_MISMATCH:
103770 /* To get warnings on missing members */
103771 break;
103772 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
103773 static void check_sec_ref(struct module *mod, const char *modname,
103774 struct elf_info *elf)
103775 {
103776 - int i;
103777 + unsigned int i;
103778 Elf_Shdr *sechdrs = elf->sechdrs;
103779
103780 /* Walk through all sections */
103781 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
103782 va_end(ap);
103783 }
103784
103785 -void buf_write(struct buffer *buf, const char *s, int len)
103786 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
103787 {
103788 if (buf->size - buf->pos < len) {
103789 buf->size += len + SZ;
103790 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
103791 if (fstat(fileno(file), &st) < 0)
103792 goto close_write;
103793
103794 - if (st.st_size != b->pos)
103795 + if (st.st_size != (off_t)b->pos)
103796 goto close_write;
103797
103798 tmp = NOFAIL(malloc(b->pos));
103799 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
103800 index 09f58e3..4b66092 100644
103801 --- a/scripts/mod/modpost.h
103802 +++ b/scripts/mod/modpost.h
103803 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
103804
103805 struct buffer {
103806 char *p;
103807 - int pos;
103808 - int size;
103809 + unsigned int pos;
103810 + unsigned int size;
103811 };
103812
103813 void __attribute__((format(printf, 2, 3)))
103814 buf_printf(struct buffer *buf, const char *fmt, ...);
103815
103816 void
103817 -buf_write(struct buffer *buf, const char *s, int len);
103818 +buf_write(struct buffer *buf, const char *s, unsigned int len);
103819
103820 struct module {
103821 struct module *next;
103822 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
103823 index ecf9c7d..d52b38e 100644
103824 --- a/scripts/mod/sumversion.c
103825 +++ b/scripts/mod/sumversion.c
103826 @@ -455,7 +455,7 @@ static void write_version(const char *filename, const char *sum,
103827 goto out;
103828 }
103829
103830 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
103831 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
103832 warn("writing sum in %s failed: %s\n",
103833 filename, strerror(errno));
103834 goto out;
103835 diff --git a/scripts/package/mkspec b/scripts/package/mkspec
103836 index 47bdd2f..d4d4e93 100755
103837 --- a/scripts/package/mkspec
103838 +++ b/scripts/package/mkspec
103839 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
103840 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
103841 echo "%endif"
103842
103843 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
103844 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
103845 echo "%ifarch ia64"
103846 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
103847 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
103848 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
103849 index 5c11312..72742b5 100644
103850 --- a/scripts/pnmtologo.c
103851 +++ b/scripts/pnmtologo.c
103852 @@ -237,14 +237,14 @@ static void write_header(void)
103853 fprintf(out, " * Linux logo %s\n", logoname);
103854 fputs(" */\n\n", out);
103855 fputs("#include <linux/linux_logo.h>\n\n", out);
103856 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
103857 + fprintf(out, "static unsigned char %s_data[] = {\n",
103858 logoname);
103859 }
103860
103861 static void write_footer(void)
103862 {
103863 fputs("\n};\n\n", out);
103864 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
103865 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
103866 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
103867 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
103868 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
103869 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
103870 fputs("\n};\n\n", out);
103871
103872 /* write logo clut */
103873 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
103874 + fprintf(out, "static unsigned char %s_clut[] = {\n",
103875 logoname);
103876 write_hex_cnt = 0;
103877 for (i = 0; i < logo_clutsize; i++) {
103878 diff --git a/scripts/tags.sh b/scripts/tags.sh
103879 index d52f7a0..269eb1b 100755
103880 --- a/scripts/tags.sh
103881 +++ b/scripts/tags.sh
103882 @@ -93,6 +93,11 @@ docscope()
103883 cscope -b -f cscope.out
103884 }
103885
103886 +dogtags()
103887 +{
103888 + all_sources | gtags -f -
103889 +}
103890 +
103891 exuberant()
103892 {
103893 all_sources | xargs $1 -a \
103894 @@ -164,6 +169,10 @@ case "$1" in
103895 docscope
103896 ;;
103897
103898 + "gtags")
103899 + dogtags
103900 + ;;
103901 +
103902 "tags")
103903 rm -f tags
103904 xtags ctags
103905 diff --git a/security/Kconfig b/security/Kconfig
103906 index fb363cd..a9d08e5 100644
103907 --- a/security/Kconfig
103908 +++ b/security/Kconfig
103909 @@ -4,6 +4,638 @@
103910
103911 menu "Security options"
103912
103913 +source grsecurity/Kconfig
103914 +
103915 +menu "PaX"
103916 +
103917 + config ARCH_TRACK_EXEC_LIMIT
103918 + bool
103919 +
103920 + config PAX_KERNEXEC_PLUGIN
103921 + bool
103922 +
103923 + config PAX_PER_CPU_PGD
103924 + bool
103925 +
103926 + config TASK_SIZE_MAX_SHIFT
103927 + int
103928 + depends on X86_64
103929 + default 47 if !PAX_PER_CPU_PGD
103930 + default 42 if PAX_PER_CPU_PGD
103931 +
103932 + config PAX_ENABLE_PAE
103933 + bool
103934 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
103935 +
103936 +config PAX
103937 + bool "Enable various PaX features"
103938 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
103939 + help
103940 + This allows you to enable various PaX features. PaX adds
103941 + intrusion prevention mechanisms to the kernel that reduce
103942 + the risks posed by exploitable memory corruption bugs.
103943 +
103944 +menu "PaX Control"
103945 + depends on PAX
103946 +
103947 +config PAX_SOFTMODE
103948 + bool 'Support soft mode'
103949 + help
103950 + Enabling this option will allow you to run PaX in soft mode, that
103951 + is, PaX features will not be enforced by default, only on executables
103952 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
103953 + support as they are the only way to mark executables for soft mode use.
103954 +
103955 + Soft mode can be activated by using the "pax_softmode=1" kernel command
103956 + line option on boot. Furthermore you can control various PaX features
103957 + at runtime via the entries in /proc/sys/kernel/pax.
103958 +
103959 +config PAX_EI_PAX
103960 + bool 'Use legacy ELF header marking'
103961 + help
103962 + Enabling this option will allow you to control PaX features on
103963 + a per executable basis via the 'chpax' utility available at
103964 + http://pax.grsecurity.net/. The control flags will be read from
103965 + an otherwise reserved part of the ELF header. This marking has
103966 + numerous drawbacks (no support for soft-mode, toolchain does not
103967 + know about the non-standard use of the ELF header) therefore it
103968 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
103969 + support.
103970 +
103971 + If you have applications not marked by the PT_PAX_FLAGS ELF program
103972 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
103973 + option otherwise they will not get any protection.
103974 +
103975 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
103976 + support as well, they will override the legacy EI_PAX marks.
103977 +
103978 +config PAX_PT_PAX_FLAGS
103979 + bool 'Use ELF program header marking'
103980 + help
103981 + Enabling this option will allow you to control PaX features on
103982 + a per executable basis via the 'paxctl' utility available at
103983 + http://pax.grsecurity.net/. The control flags will be read from
103984 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
103985 + has the benefits of supporting both soft mode and being fully
103986 + integrated into the toolchain (the binutils patch is available
103987 + from http://pax.grsecurity.net).
103988 +
103989 + If you have applications not marked by the PT_PAX_FLAGS ELF program
103990 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
103991 + support otherwise they will not get any protection.
103992 +
103993 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
103994 + must make sure that the marks are the same if a binary has both marks.
103995 +
103996 + Note that if you enable the legacy EI_PAX marking support as well,
103997 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
103998 +
103999 +config PAX_XATTR_PAX_FLAGS
104000 + bool 'Use filesystem extended attributes marking'
104001 + select CIFS_XATTR if CIFS
104002 + select EXT2_FS_XATTR if EXT2_FS
104003 + select EXT3_FS_XATTR if EXT3_FS
104004 + select EXT4_FS_XATTR if EXT4_FS
104005 + select JFFS2_FS_XATTR if JFFS2_FS
104006 + select REISERFS_FS_XATTR if REISERFS_FS
104007 + select UBIFS_FS_XATTR if UBIFS_FS
104008 + help
104009 + Enabling this option will allow you to control PaX features on
104010 + a per executable basis via the 'setfattr' utility. The control
104011 + flags will be read from the user.pax.flags extended attribute of
104012 + the file. This marking has the benefit of supporting binary-only
104013 + applications that self-check themselves (e.g., skype) and would
104014 + not tolerate chpax/paxctl changes. The main drawback is that
104015 + extended attributes are not supported by some filesystems (e.g.,
104016 + isofs, squashfs, tmpfs, udf, vfat) so copying files through such
104017 + filesystems will lose the extended attributes and these PaX markings.
104018 +
104019 + If you have applications not marked by the PT_PAX_FLAGS ELF program
104020 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
104021 + support otherwise they will not get any protection.
104022 +
104023 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
104024 + must make sure that the marks are the same if a binary has both marks.
104025 +
104026 + Note that if you enable the legacy EI_PAX marking support as well,
104027 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
104028 +
104029 +choice
104030 + prompt 'MAC system integration'
104031 + default PAX_HAVE_ACL_FLAGS
104032 + help
104033 + Mandatory Access Control systems have the option of controlling
104034 + PaX flags on a per executable basis, choose the method supported
104035 + by your particular system.
104036 +
104037 + - "none": if your MAC system does not interact with PaX,
104038 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
104039 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
104040 +
104041 + NOTE: this option is for developers/integrators only.
104042 +
104043 + config PAX_NO_ACL_FLAGS
104044 + bool 'none'
104045 +
104046 + config PAX_HAVE_ACL_FLAGS
104047 + bool 'direct'
104048 +
104049 + config PAX_HOOK_ACL_FLAGS
104050 + bool 'hook'
104051 +endchoice
104052 +
104053 +endmenu
104054 +
104055 +menu "Non-executable pages"
104056 + depends on PAX
104057 +
104058 +config PAX_NOEXEC
104059 + bool "Enforce non-executable pages"
104060 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
104061 + help
104062 + By design some architectures do not allow for protecting memory
104063 + pages against execution or even if they do, Linux does not make
104064 + use of this feature. In practice this means that if a page is
104065 + readable (such as the stack or heap) it is also executable.
104066 +
104067 + There is a well known exploit technique that makes use of this
104068 + fact and a common programming mistake where an attacker can
104069 + introduce code of his choice somewhere in the attacked program's
104070 + memory (typically the stack or the heap) and then execute it.
104071 +
104072 + If the attacked program was running with different (typically
104073 + higher) privileges than that of the attacker, then he can elevate
104074 + his own privilege level (e.g. get a root shell, write to files for
104075 + which he does not have write access to, etc).
104076 +
104077 + Enabling this option will let you choose from various features
104078 + that prevent the injection and execution of 'foreign' code in
104079 + a program.
104080 +
104081 + This will also break programs that rely on the old behaviour and
104082 + expect that dynamically allocated memory via the malloc() family
104083 + of functions is executable (which it is not). Notable examples
104084 + are the XFree86 4.x server, the java runtime and wine.
104085 +
104086 +config PAX_PAGEEXEC
104087 + bool "Paging based non-executable pages"
104088 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
104089 + select S390_SWITCH_AMODE if S390
104090 + select S390_EXEC_PROTECT if S390
104091 + select ARCH_TRACK_EXEC_LIMIT if X86_32
104092 + help
104093 + This implementation is based on the paging feature of the CPU.
104094 + On i386 without hardware non-executable bit support there is a
104095 + variable but usually low performance impact, however on Intel's
104096 + P4 core based CPUs it is very high so you should not enable this
104097 + for kernels meant to be used on such CPUs.
104098 +
104099 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
104100 + with hardware non-executable bit support there is no performance
104101 + impact, on ppc the impact is negligible.
104102 +
104103 + Note that several architectures require various emulations due to
104104 + badly designed userland ABIs, this will cause a performance impact
104105 + but will disappear as soon as userland is fixed. For example, ppc
104106 + userland MUST have been built with secure-plt by a recent toolchain.
104107 +
104108 +config PAX_SEGMEXEC
104109 + bool "Segmentation based non-executable pages"
104110 + depends on PAX_NOEXEC && X86_32
104111 + help
104112 + This implementation is based on the segmentation feature of the
104113 + CPU and has a very small performance impact, however applications
104114 + will be limited to a 1.5 GB address space instead of the normal
104115 + 3 GB.
104116 +
104117 +config PAX_EMUTRAMP
104118 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
104119 + default y if PARISC
104120 + help
104121 + There are some programs and libraries that for one reason or
104122 + another attempt to execute special small code snippets from
104123 + non-executable memory pages. Most notable examples are the
104124 + signal handler return code generated by the kernel itself and
104125 + the GCC trampolines.
104126 +
104127 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
104128 + such programs will no longer work under your kernel.
104129 +
104130 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
104131 + utilities to enable trampoline emulation for the affected programs
104132 + yet still have the protection provided by the non-executable pages.
104133 +
104134 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
104135 + your system will not even boot.
104136 +
104137 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
104138 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
104139 + for the affected files.
104140 +
104141 + NOTE: enabling this feature *may* open up a loophole in the
104142 + protection provided by non-executable pages that an attacker
104143 + could abuse. Therefore the best solution is to not have any
104144 + files on your system that would require this option. This can
104145 + be achieved by not using libc5 (which relies on the kernel
104146 + signal handler return code) and not using or rewriting programs
104147 + that make use of the nested function implementation of GCC.
104148 + Skilled users can just fix GCC itself so that it implements
104149 + nested function calls in a way that does not interfere with PaX.
104150 +
104151 +config PAX_EMUSIGRT
104152 + bool "Automatically emulate sigreturn trampolines"
104153 + depends on PAX_EMUTRAMP && PARISC
104154 + default y
104155 + help
104156 + Enabling this option will have the kernel automatically detect
104157 + and emulate signal return trampolines executing on the stack
104158 + that would otherwise lead to task termination.
104159 +
104160 + This solution is intended as a temporary one for users with
104161 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
104162 + Modula-3 runtime, etc) or executables linked to such, basically
104163 + everything that does not specify its own SA_RESTORER function in
104164 + normal executable memory like glibc 2.1+ does.
104165 +
104166 + On parisc you MUST enable this option, otherwise your system will
104167 + not even boot.
104168 +
104169 + NOTE: this feature cannot be disabled on a per executable basis
104170 + and since it *does* open up a loophole in the protection provided
104171 + by non-executable pages, the best solution is to not have any
104172 + files on your system that would require this option.
104173 +
104174 +config PAX_MPROTECT
104175 + bool "Restrict mprotect()"
104176 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
104177 + help
104178 + Enabling this option will prevent programs from
104179 + - changing the executable status of memory pages that were
104180 + not originally created as executable,
104181 + - making read-only executable pages writable again,
104182 + - creating executable pages from anonymous memory,
104183 + - making read-only-after-relocations (RELRO) data pages writable again.
104184 +
104185 + You should say Y here to complete the protection provided by
104186 + the enforcement of non-executable pages.
104187 +
104188 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
104189 + this feature on a per file basis.
104190 +
104191 +config PAX_MPROTECT_COMPAT
104192 + bool "Use legacy/compat protection demoting (read help)"
104193 + depends on PAX_MPROTECT
104194 + default n
104195 + help
104196 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
104197 + by sending the proper error code to the application. For some broken
104198 + userland, this can cause problems with Python or other applications. The
104199 + current implementation however allows for applications like clamav to
104200 + detect if JIT compilation/execution is allowed and to fall back gracefully
104201 + to an interpreter-based mode if it does not. While we encourage everyone
104202 + to use the current implementation as-is and push upstream to fix broken
104203 + userland (note that the RWX logging option can assist with this), in some
104204 + environments this may not be possible. Having to disable MPROTECT
104205 + completely on certain binaries reduces the security benefit of PaX,
104206 + so this option is provided for those environments to revert to the old
104207 + behavior.
104208 +
104209 +config PAX_ELFRELOCS
104210 + bool "Allow ELF text relocations (read help)"
104211 + depends on PAX_MPROTECT
104212 + default n
104213 + help
104214 + Non-executable pages and mprotect() restrictions are effective
104215 + in preventing the introduction of new executable code into an
104216 + attacked task's address space. There remain only two venues
104217 + for this kind of attack: if the attacker can execute already
104218 + existing code in the attacked task then he can either have it
104219 + create and mmap() a file containing his code or have it mmap()
104220 + an already existing ELF library that does not have position
104221 + independent code in it and use mprotect() on it to make it
104222 + writable and copy his code there. While protecting against
104223 + the former approach is beyond PaX, the latter can be prevented
104224 + by having only PIC ELF libraries on one's system (which do not
104225 + need to relocate their code). If you are sure this is your case,
104226 + as is the case with all modern Linux distributions, then leave
104227 + this option disabled. You should say 'n' here.
104228 +
104229 +config PAX_ETEXECRELOCS
104230 + bool "Allow ELF ET_EXEC text relocations"
104231 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
104232 + select PAX_ELFRELOCS
104233 + default y
104234 + help
104235 + On some architectures there are incorrectly created applications
104236 + that require text relocations and would not work without enabling
104237 + this option. If you are an alpha, ia64 or parisc user, you should
104238 + enable this option and disable it once you have made sure that
104239 + none of your applications need it.
104240 +
104241 +config PAX_EMUPLT
104242 + bool "Automatically emulate ELF PLT"
104243 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
104244 + default y
104245 + help
104246 + Enabling this option will have the kernel automatically detect
104247 + and emulate the Procedure Linkage Table entries in ELF files.
104248 + On some architectures such entries are in writable memory, and
104249 + become non-executable leading to task termination. Therefore
104250 + it is mandatory that you enable this option on alpha, parisc,
104251 + sparc and sparc64, otherwise your system would not even boot.
104252 +
104253 + NOTE: this feature *does* open up a loophole in the protection
104254 + provided by the non-executable pages, therefore the proper
104255 + solution is to modify the toolchain to produce a PLT that does
104256 + not need to be writable.
104257 +
104258 +config PAX_DLRESOLVE
104259 + bool 'Emulate old glibc resolver stub'
104260 + depends on PAX_EMUPLT && SPARC
104261 + default n
104262 + help
104263 + This option is needed if userland has an old glibc (before 2.4)
104264 + that puts a 'save' instruction into the runtime generated resolver
104265 + stub that needs special emulation.
104266 +
104267 +config PAX_KERNEXEC
104268 + bool "Enforce non-executable kernel pages"
104269 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
104270 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
104271 + select PAX_KERNEXEC_PLUGIN if X86_64
104272 + help
104273 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
104274 + that is, enabling this option will make it harder to inject
104275 + and execute 'foreign' code in kernel memory itself.
104276 +
104277 + Note that on x86_64 kernels there is a known regression when
104278 + this feature and KVM/VMX are both enabled in the host kernel.
104279 +
104280 +choice
104281 + prompt "Return Address Instrumentation Method"
104282 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
104283 + depends on PAX_KERNEXEC_PLUGIN
104284 + help
104285 + Select the method used to instrument function pointer dereferences.
104286 + Note that binary modules cannot be instrumented by this approach.
104287 +
104288 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
104289 + bool "bts"
104290 + help
104291 + This method is compatible with binary only modules but has
104292 + a higher runtime overhead.
104293 +
104294 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
104295 + bool "or"
104296 + depends on !PARAVIRT
104297 + help
104298 + This method is incompatible with binary only modules but has
104299 + a lower runtime overhead.
104300 +endchoice
104301 +
104302 +config PAX_KERNEXEC_PLUGIN_METHOD
104303 + string
104304 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
104305 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
104306 + default ""
104307 +
104308 +config PAX_KERNEXEC_MODULE_TEXT
104309 + int "Minimum amount of memory reserved for module code"
104310 + default "4"
104311 + depends on PAX_KERNEXEC && X86_32 && MODULES
104312 + help
104313 + Due to implementation details the kernel must reserve a fixed
104314 + amount of memory for module code at compile time that cannot be
104315 + changed at runtime. Here you can specify the minimum amount
104316 + in MB that will be reserved. Due to the same implementation
104317 + details this size will always be rounded up to the next 2/4 MB
104318 + boundary (depends on PAE) so the actually available memory for
104319 + module code will usually be more than this minimum.
104320 +
104321 + The default 4 MB should be enough for most users but if you have
104322 + an excessive number of modules (e.g., most distribution configs
104323 + compile many drivers as modules) or use huge modules such as
104324 + nvidia's kernel driver, you will need to adjust this amount.
104325 + A good rule of thumb is to look at your currently loaded kernel
104326 + modules and add up their sizes.
104327 +
104328 +endmenu
104329 +
104330 +menu "Address Space Layout Randomization"
104331 + depends on PAX
104332 +
104333 +config PAX_ASLR
104334 + bool "Address Space Layout Randomization"
104335 + help
104336 + Many if not most exploit techniques rely on the knowledge of
104337 + certain addresses in the attacked program. The following options
104338 + will allow the kernel to apply a certain amount of randomization
104339 + to specific parts of the program thereby forcing an attacker to
104340 + guess them in most cases. Any failed guess will most likely crash
104341 + the attacked program which allows the kernel to detect such attempts
104342 + and react on them. PaX itself provides no reaction mechanisms,
104343 + instead it is strongly encouraged that you make use of Nergal's
104344 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
104345 + (http://www.grsecurity.net/) built-in crash detection features or
104346 + develop one yourself.
104347 +
104348 + By saying Y here you can choose to randomize the following areas:
104349 + - top of the task's kernel stack
104350 + - top of the task's userland stack
104351 + - base address for mmap() requests that do not specify one
104352 + (this includes all libraries)
104353 + - base address of the main executable
104354 +
104355 + It is strongly recommended to say Y here as address space layout
104356 + randomization has negligible impact on performance yet it provides
104357 + a very effective protection.
104358 +
104359 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
104360 + this feature on a per file basis.
104361 +
104362 +config PAX_RANDKSTACK
104363 + bool "Randomize kernel stack base"
104364 + depends on X86_TSC && X86
104365 + help
104366 + By saying Y here the kernel will randomize every task's kernel
104367 + stack on every system call. This will not only force an attacker
104368 + to guess it but also prevent him from making use of possible
104369 + leaked information about it.
104370 +
104371 + Since the kernel stack is a rather scarce resource, randomization
104372 + may cause unexpected stack overflows, therefore you should very
104373 + carefully test your system. Note that once enabled in the kernel
104374 + configuration, this feature cannot be disabled on a per file basis.
104375 +
104376 +config PAX_RANDUSTACK
104377 + bool "Randomize user stack base"
104378 + depends on PAX_ASLR
104379 + help
104380 + By saying Y here the kernel will randomize every task's userland
104381 + stack. The randomization is done in two steps where the second
104382 + one may apply a big amount of shift to the top of the stack and
104383 + cause problems for programs that want to use lots of memory (more
104384 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
104385 + For this reason the second step can be controlled by 'chpax' or
104386 + 'paxctl' on a per file basis.
104387 +
104388 +config PAX_RANDMMAP
104389 + bool "Randomize mmap() base"
104390 + depends on PAX_ASLR
104391 + help
104392 + By saying Y here the kernel will use a randomized base address for
104393 + mmap() requests that do not specify one themselves. As a result
104394 + all dynamically loaded libraries will appear at random addresses
104395 + and therefore be harder to exploit by a technique where an attacker
104396 + attempts to execute library code for his purposes (e.g. spawn a
104397 + shell from an exploited program that is running at an elevated
104398 + privilege level).
104399 +
104400 + Furthermore, if a program is relinked as a dynamic ELF file, its
104401 + base address will be randomized as well, completing the full
104402 + randomization of the address space layout. Attacking such programs
104403 + becomes a guess game. You can find an example of doing this at
104404 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
104405 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
104406 +
104407 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
104408 + feature on a per file basis.
104409 +
104410 +endmenu
104411 +
104412 +menu "Miscellaneous hardening features"
104413 +
104414 +config PAX_MEMORY_SANITIZE
104415 + bool "Sanitize all freed memory"
104416 + depends on !HIBERNATION
104417 + help
104418 + By saying Y here the kernel will erase memory pages as soon as they
104419 + are freed. This in turn reduces the lifetime of data stored in the
104420 + pages, making it less likely that sensitive information such as
104421 + passwords, cryptographic secrets, etc stay in memory for too long.
104422 +
104423 + This is especially useful for programs whose runtime is short, long
104424 + lived processes and the kernel itself benefit from this as long as
104425 + they operate on whole memory pages and ensure timely freeing of pages
104426 + that may hold sensitive information.
104427 +
104428 + The tradeoff is performance impact, on a single CPU system kernel
104429 + compilation sees a 3% slowdown, other systems and workloads may vary
104430 + and you are advised to test this feature on your expected workload
104431 + before deploying it.
104432 +
104433 + Note that this feature does not protect data stored in live pages,
104434 + e.g., process memory swapped to disk may stay there for a long time.
104435 +
104436 +config PAX_MEMORY_STACKLEAK
104437 + bool "Sanitize kernel stack"
104438 + depends on X86
104439 + help
104440 + By saying Y here the kernel will erase the kernel stack before it
104441 + returns from a system call. This in turn reduces the information
104442 + that a kernel stack leak bug can reveal.
104443 +
104444 + Note that such a bug can still leak information that was put on
104445 + the stack by the current system call (the one eventually triggering
104446 + the bug) but traces of earlier system calls on the kernel stack
104447 + cannot leak anymore.
104448 +
104449 + The tradeoff is performance impact, on a single CPU system kernel
104450 + compilation sees a 1% slowdown, other systems and workloads may vary
104451 + and you are advised to test this feature on your expected workload
104452 + before deploying it.
104453 +
104454 + Note: full support for this feature requires gcc with plugin support
104455 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
104456 + versions means that functions with large enough stack frames may
104457 + leave uninitialized memory behind that may be exposed to a later
104458 + syscall leaking the stack.
104459 +
104460 +config PAX_MEMORY_UDEREF
104461 + bool "Prevent invalid userland pointer dereference"
104462 + depends on X86 && !UML_X86 && !XEN
104463 + select PAX_PER_CPU_PGD if X86_64
104464 + help
104465 + By saying Y here the kernel will be prevented from dereferencing
104466 + userland pointers in contexts where the kernel expects only kernel
104467 + pointers. This is both a useful runtime debugging feature and a
104468 + security measure that prevents exploiting a class of kernel bugs.
104469 +
104470 + The tradeoff is that some virtualization solutions may experience
104471 + a huge slowdown and therefore you should not enable this feature
104472 + for kernels meant to run in such environments. Whether a given VM
104473 + solution is affected or not is best determined by simply trying it
104474 + out, the performance impact will be obvious right on boot as this
104475 + mechanism engages from very early on. A good rule of thumb is that
104476 + VMs running on CPUs without hardware virtualization support (i.e.,
104477 + the majority of IA-32 CPUs) will likely experience the slowdown.
104478 +
104479 +config PAX_REFCOUNT
104480 + bool "Prevent various kernel object reference counter overflows"
104481 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
104482 + help
104483 + By saying Y here the kernel will detect and prevent overflowing
104484 + various (but not all) kinds of object reference counters. Such
104485 + overflows can normally occur due to bugs only and are often, if
104486 + not always, exploitable.
104487 +
104488 + The tradeoff is that data structures protected by an overflowed
104489 + refcount will never be freed and therefore will leak memory. Note
104490 + that this leak also happens even without this protection but in
104491 + that case the overflow can eventually trigger the freeing of the
104492 + data structure while it is still being used elsewhere, resulting
104493 + in the exploitable situation that this feature prevents.
104494 +
104495 + Since this has a negligible performance impact, you should enable
104496 + this feature.
104497 +
104498 +config PAX_USERCOPY
104499 + bool "Harden heap object copies between kernel and userland"
104500 + depends on X86 || PPC || SPARC || ARM
104501 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
104502 + help
104503 + By saying Y here the kernel will enforce the size of heap objects
104504 + when they are copied in either direction between the kernel and
104505 + userland, even if only a part of the heap object is copied.
104506 +
104507 + Specifically, this checking prevents information leaking from the
104508 + kernel heap during kernel to userland copies (if the kernel heap
104509 + object is otherwise fully initialized) and prevents kernel heap
104510 + overflows during userland to kernel copies.
104511 +
104512 + Note that the current implementation provides the strictest bounds
104513 + checks for the SLUB allocator.
104514 +
104515 + Enabling this option also enables per-slab cache protection against
104516 + data in a given cache being copied into/out of via userland
104517 + accessors. Though the whitelist of regions will be reduced over
104518 + time, it notably protects important data structures like task structs.
104519 +
104520 +
104521 + If frame pointers are enabled on x86, this option will also
104522 + restrict copies into and out of the kernel stack to local variables
104523 + within a single frame.
104524 +
104525 + Since this has a negligible performance impact, you should enable
104526 + this feature.
104527 +
104528 +config PAX_SIZE_OVERFLOW
104529 + bool "Prevent various integer overflows in function size parameters"
104530 + help
104531 + By saying Y here the kernel recomputes expressions of function
104532 + arguments marked by a size_overflow attribute with double integer
104533 + precision (DImode/TImode for 32/64 bit integer types).
104534 +
104535 + The recomputed argument is checked against INT_MAX and an event
104536 + is logged on overflow and the triggering process is killed.
104537 +
104538 + Homepage:
104539 + http://www.grsecurity.net/~ephox/overflow_plugin/
104540 +
104541 +endmenu
104542 +
104543 +endmenu
104544 +
104545 config KEYS
104546 bool "Enable access key retention support"
104547 help
104548 @@ -146,7 +778,7 @@ config INTEL_TXT
104549 config LSM_MMAP_MIN_ADDR
104550 int "Low address space for LSM to protect from user allocation"
104551 depends on SECURITY && SECURITY_SELINUX
104552 - default 65536
104553 + default 32768
104554 help
104555 This is the portion of low virtual memory which should be protected
104556 from userspace allocation. Keeping a user from writing to low pages
104557 diff --git a/security/capability.c b/security/capability.c
104558 index fce07a7..5f12858 100644
104559 --- a/security/capability.c
104560 +++ b/security/capability.c
104561 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *lsmrule)
104562 }
104563 #endif /* CONFIG_AUDIT */
104564
104565 -struct security_operations default_security_ops = {
104566 +struct security_operations default_security_ops __read_only = {
104567 .name = "default",
104568 };
104569
104570 diff --git a/security/commoncap.c b/security/commoncap.c
104571 index fe30751..aaba312 100644
104572 --- a/security/commoncap.c
104573 +++ b/security/commoncap.c
104574 @@ -27,6 +27,8 @@
104575 #include <linux/sched.h>
104576 #include <linux/prctl.h>
104577 #include <linux/securebits.h>
104578 +#include <linux/syslog.h>
104579 +#include <net/sock.h>
104580
104581 /*
104582 * If a non-root user executes a setuid-root binary in
104583 @@ -50,9 +52,18 @@ static void warn_setuid_and_fcaps_mixed(char *fname)
104584 }
104585 }
104586
104587 +#ifdef CONFIG_NET
104588 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
104589 +#endif
104590 +
104591 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
104592 {
104593 +#ifdef CONFIG_NET
104594 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
104595 +#else
104596 NETLINK_CB(skb).eff_cap = current_cap();
104597 +#endif
104598 +
104599 return 0;
104600 }
104601
104602 @@ -582,6 +593,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
104603 {
104604 const struct cred *cred = current_cred();
104605
104606 + if (gr_acl_enable_at_secure())
104607 + return 1;
104608 +
104609 if (cred->uid != 0) {
104610 if (bprm->cap_effective)
104611 return 1;
104612 @@ -956,13 +970,18 @@ error:
104613 /**
104614 * cap_syslog - Determine whether syslog function is permitted
104615 * @type: Function requested
104616 + * @from_file: Whether this request came from an open file (i.e. /proc)
104617 *
104618 * Determine whether the current process is permitted to use a particular
104619 * syslog function, returning 0 if permission is granted, -ve if not.
104620 */
104621 -int cap_syslog(int type)
104622 +int cap_syslog(int type, bool from_file)
104623 {
104624 - if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
104625 + /* /proc/kmsg can open be opened by CAP_SYS_ADMIN */
104626 + if (type != SYSLOG_ACTION_OPEN && from_file)
104627 + return 0;
104628 + if ((type != SYSLOG_ACTION_READ_ALL &&
104629 + type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
104630 return -EPERM;
104631 return 0;
104632 }
104633 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
104634 index 165eb53..b1db4eb 100644
104635 --- a/security/integrity/ima/ima.h
104636 +++ b/security/integrity/ima/ima.h
104637 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
104638 extern spinlock_t ima_queue_lock;
104639
104640 struct ima_h_table {
104641 - atomic_long_t len; /* number of stored measurements in the list */
104642 - atomic_long_t violations;
104643 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
104644 + atomic_long_unchecked_t violations;
104645 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
104646 };
104647 extern struct ima_h_table ima_htable;
104648 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
104649 index 852bf85..35d6df3 100644
104650 --- a/security/integrity/ima/ima_api.c
104651 +++ b/security/integrity/ima/ima_api.c
104652 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
104653 int result;
104654
104655 /* can overflow, only indicator */
104656 - atomic_long_inc(&ima_htable.violations);
104657 + atomic_long_inc_unchecked(&ima_htable.violations);
104658
104659 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
104660 if (!entry) {
104661 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
104662 index 0c72c9c..433e29b 100644
104663 --- a/security/integrity/ima/ima_fs.c
104664 +++ b/security/integrity/ima/ima_fs.c
104665 @@ -27,12 +27,12 @@
104666 static int valid_policy = 1;
104667 #define TMPBUFLEN 12
104668 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
104669 - loff_t *ppos, atomic_long_t *val)
104670 + loff_t *ppos, atomic_long_unchecked_t *val)
104671 {
104672 char tmpbuf[TMPBUFLEN];
104673 ssize_t len;
104674
104675 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
104676 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
104677 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
104678 }
104679
104680 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
104681 index e19316d..339f7ae 100644
104682 --- a/security/integrity/ima/ima_queue.c
104683 +++ b/security/integrity/ima/ima_queue.c
104684 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
104685 INIT_LIST_HEAD(&qe->later);
104686 list_add_tail_rcu(&qe->later, &ima_measurements);
104687
104688 - atomic_long_inc(&ima_htable.len);
104689 + atomic_long_inc_unchecked(&ima_htable.len);
104690 key = ima_hash_key(entry->digest);
104691 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
104692 return 0;
104693 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
104694 index e031952..c9a535d 100644
104695 --- a/security/keys/keyring.c
104696 +++ b/security/keys/keyring.c
104697 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
104698 ret = -EFAULT;
104699
104700 for (loop = 0; loop < klist->nkeys; loop++) {
104701 + key_serial_t serial;
104702 key = klist->keys[loop];
104703 + serial = key->serial;
104704
104705 tmp = sizeof(key_serial_t);
104706 if (tmp > buflen)
104707 tmp = buflen;
104708
104709 - if (copy_to_user(buffer,
104710 - &key->serial,
104711 - tmp) != 0)
104712 + if (copy_to_user(buffer, &serial, tmp))
104713 goto error;
104714
104715 buflen -= tmp;
104716 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
104717 index 931cfda..e71808a 100644
104718 --- a/security/keys/process_keys.c
104719 +++ b/security/keys/process_keys.c
104720 @@ -208,7 +208,7 @@ static int install_process_keyring(void)
104721 ret = install_process_keyring_to_cred(new);
104722 if (ret < 0) {
104723 abort_creds(new);
104724 - return ret != -EEXIST ?: 0;
104725 + return ret != -EEXIST ? ret : 0;
104726 }
104727
104728 return commit_creds(new);
104729 diff --git a/security/min_addr.c b/security/min_addr.c
104730 index d9f9425..c28cef4 100644
104731 --- a/security/min_addr.c
104732 +++ b/security/min_addr.c
104733 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
104734 */
104735 static void update_mmap_min_addr(void)
104736 {
104737 +#ifndef SPARC
104738 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
104739 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
104740 mmap_min_addr = dac_mmap_min_addr;
104741 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
104742 #else
104743 mmap_min_addr = dac_mmap_min_addr;
104744 #endif
104745 +#endif
104746 }
104747
104748 /*
104749 diff --git a/security/root_plug.c b/security/root_plug.c
104750 index 2f7ffa6..0455400 100644
104751 --- a/security/root_plug.c
104752 +++ b/security/root_plug.c
104753 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm)
104754 return 0;
104755 }
104756
104757 -static struct security_operations rootplug_security_ops = {
104758 +static struct security_operations rootplug_security_ops __read_only = {
104759 .bprm_check_security = rootplug_bprm_check_security,
104760 };
104761
104762 diff --git a/security/security.c b/security/security.c
104763 index c4c6732..7abf13b 100644
104764 --- a/security/security.c
104765 +++ b/security/security.c
104766 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1];
104767 extern struct security_operations default_security_ops;
104768 extern void security_fixup_ops(struct security_operations *ops);
104769
104770 -struct security_operations *security_ops; /* Initialized to NULL */
104771 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
104772
104773 static inline int verify(struct security_operations *ops)
104774 {
104775 @@ -106,7 +106,7 @@ int __init security_module_enable(struct security_operations *ops)
104776 * If there is already a security module registered with the kernel,
104777 * an error will be returned. Otherwise %0 is returned on success.
104778 */
104779 -int register_security(struct security_operations *ops)
104780 +int __init register_security(struct security_operations *ops)
104781 {
104782 if (verify(ops)) {
104783 printk(KERN_DEBUG "%s could not verify "
104784 @@ -199,9 +199,9 @@ int security_quota_on(struct dentry *dentry)
104785 return security_ops->quota_on(dentry);
104786 }
104787
104788 -int security_syslog(int type)
104789 +int security_syslog(int type, bool from_file)
104790 {
104791 - return security_ops->syslog(type);
104792 + return security_ops->syslog(type, from_file);
104793 }
104794
104795 int security_settime(struct timespec *ts, struct timezone *tz)
104796 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
104797 index a106754..ca3a589 100644
104798 --- a/security/selinux/hooks.c
104799 +++ b/security/selinux/hooks.c
104800 @@ -76,6 +76,7 @@
104801 #include <linux/selinux.h>
104802 #include <linux/mutex.h>
104803 #include <linux/posix-timers.h>
104804 +#include <linux/syslog.h>
104805
104806 #include "avc.h"
104807 #include "objsec.h"
104808 @@ -131,7 +132,7 @@ int selinux_enabled = 1;
104809 * Minimal support for a secondary security module,
104810 * just to allow the use of the capability module.
104811 */
104812 -static struct security_operations *secondary_ops;
104813 +static struct security_operations *secondary_ops __read_only;
104814
104815 /* Lists of inode and superblock security structures initialized
104816 before the policy was loaded. */
104817 @@ -2050,29 +2051,30 @@ static int selinux_quota_on(struct dentry *dentry)
104818 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
104819 }
104820
104821 -static int selinux_syslog(int type)
104822 +static int selinux_syslog(int type, bool from_file)
104823 {
104824 int rc;
104825
104826 - rc = cap_syslog(type);
104827 + rc = cap_syslog(type, from_file);
104828 if (rc)
104829 return rc;
104830
104831 switch (type) {
104832 - case 3: /* Read last kernel messages */
104833 - case 10: /* Return size of the log buffer */
104834 + case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
104835 + case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
104836 rc = task_has_system(current, SYSTEM__SYSLOG_READ);
104837 break;
104838 - case 6: /* Disable logging to console */
104839 - case 7: /* Enable logging to console */
104840 - case 8: /* Set level of messages printed to console */
104841 + case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
104842 + case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
104843 + /* Set level of messages printed to console */
104844 + case SYSLOG_ACTION_CONSOLE_LEVEL:
104845 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
104846 break;
104847 - case 0: /* Close log */
104848 - case 1: /* Open log */
104849 - case 2: /* Read from log */
104850 - case 4: /* Read/clear last kernel messages */
104851 - case 5: /* Clear ring buffer */
104852 + case SYSLOG_ACTION_CLOSE: /* Close log */
104853 + case SYSLOG_ACTION_OPEN: /* Open log */
104854 + case SYSLOG_ACTION_READ: /* Read from log */
104855 + case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
104856 + case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
104857 default:
104858 rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
104859 break;
104860 @@ -5457,7 +5459,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
104861
104862 #endif
104863
104864 -static struct security_operations selinux_ops = {
104865 +static struct security_operations selinux_ops __read_only = {
104866 .name = "selinux",
104867
104868 .ptrace_access_check = selinux_ptrace_access_check,
104869 @@ -5841,7 +5843,9 @@ int selinux_disable(void)
104870 avc_disable();
104871
104872 /* Reset security_ops to the secondary module, dummy or capability. */
104873 + pax_open_kernel();
104874 security_ops = secondary_ops;
104875 + pax_close_kernel();
104876
104877 /* Unregister netfilter hooks. */
104878 selinux_nf_ip_exit();
104879 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
104880 index 13128f9..c23c736 100644
104881 --- a/security/selinux/include/xfrm.h
104882 +++ b/security/selinux/include/xfrm.h
104883 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
104884
104885 static inline void selinux_xfrm_notify_policyload(void)
104886 {
104887 - atomic_inc(&flow_cache_genid);
104888 + atomic_inc_unchecked(&flow_cache_genid);
104889 }
104890 #else
104891 static inline int selinux_xfrm_enabled(void)
104892 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
104893 index ff17820..d68084c 100644
104894 --- a/security/selinux/ss/services.c
104895 +++ b/security/selinux/ss/services.c
104896 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, size_t len)
104897 int rc = 0;
104898 struct policy_file file = { data, len }, *fp = &file;
104899
104900 + pax_track_stack();
104901 +
104902 if (!ss_initialized) {
104903 avtab_cache_init();
104904 if (policydb_read(&policydb, fp)) {
104905 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
104906 index c33b6bb..b51f19e 100644
104907 --- a/security/smack/smack_lsm.c
104908 +++ b/security/smack/smack_lsm.c
104909 @@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
104910 *
104911 * Returns 0 on success, error code otherwise.
104912 */
104913 -static int smack_syslog(int type)
104914 +static int smack_syslog(int type, bool from_file)
104915 {
104916 int rc;
104917 char *sp = current_security();
104918
104919 - rc = cap_syslog(type);
104920 + rc = cap_syslog(type, from_file);
104921 if (rc != 0)
104922 return rc;
104923
104924 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
104925 return 0;
104926 }
104927
104928 -struct security_operations smack_ops = {
104929 +struct security_operations smack_ops __read_only = {
104930 .name = "smack",
104931
104932 .ptrace_access_check = smack_ptrace_access_check,
104933 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
104934 index 9548a09..9a5f384 100644
104935 --- a/security/tomoyo/tomoyo.c
104936 +++ b/security/tomoyo/tomoyo.c
104937 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
104938 * tomoyo_security_ops is a "struct security_operations" which is used for
104939 * registering TOMOYO.
104940 */
104941 -static struct security_operations tomoyo_security_ops = {
104942 +static struct security_operations tomoyo_security_ops __read_only = {
104943 .name = "tomoyo",
104944 .cred_alloc_blank = tomoyo_cred_alloc_blank,
104945 .cred_prepare = tomoyo_cred_prepare,
104946 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
104947 index 84bb07d..c2ab6b6 100644
104948 --- a/sound/aoa/codecs/onyx.c
104949 +++ b/sound/aoa/codecs/onyx.c
104950 @@ -53,7 +53,7 @@ struct onyx {
104951 spdif_locked:1,
104952 analog_locked:1,
104953 original_mute:2;
104954 - int open_count;
104955 + local_t open_count;
104956 struct codec_info *codec_info;
104957
104958 /* mutex serializes concurrent access to the device
104959 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_item *cii,
104960 struct onyx *onyx = cii->codec_data;
104961
104962 mutex_lock(&onyx->mutex);
104963 - onyx->open_count++;
104964 + local_inc(&onyx->open_count);
104965 mutex_unlock(&onyx->mutex);
104966
104967 return 0;
104968 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_item *cii,
104969 struct onyx *onyx = cii->codec_data;
104970
104971 mutex_lock(&onyx->mutex);
104972 - onyx->open_count--;
104973 - if (!onyx->open_count)
104974 + if (local_dec_and_test(&onyx->open_count))
104975 onyx->spdif_locked = onyx->analog_locked = 0;
104976 mutex_unlock(&onyx->mutex);
104977
104978 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
104979 index ffd2025..df062c9 100644
104980 --- a/sound/aoa/codecs/onyx.h
104981 +++ b/sound/aoa/codecs/onyx.h
104982 @@ -11,6 +11,7 @@
104983 #include <linux/i2c.h>
104984 #include <asm/pmac_low_i2c.h>
104985 #include <asm/prom.h>
104986 +#include <asm/local.h>
104987
104988 /* PCM3052 register definitions */
104989
104990 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
104991 index d9c9635..bc0a5a2 100644
104992 --- a/sound/core/oss/pcm_oss.c
104993 +++ b/sound/core/oss/pcm_oss.c
104994 @@ -1395,7 +1395,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
104995 }
104996 } else {
104997 tmp = snd_pcm_oss_write2(substream,
104998 - (const char __force *)buf,
104999 + (const char __force_kernel *)buf,
105000 runtime->oss.period_bytes, 0);
105001 if (tmp <= 0)
105002 goto err;
105003 @@ -1483,7 +1483,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
105004 xfer += tmp;
105005 runtime->oss.buffer_used -= tmp;
105006 } else {
105007 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
105008 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
105009 runtime->oss.period_bytes, 0);
105010 if (tmp <= 0)
105011 goto err;
105012 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
105013 index 038232d..7dd9e5c 100644
105014 --- a/sound/core/pcm_compat.c
105015 +++ b/sound/core/pcm_compat.c
105016 @@ -30,7 +30,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
105017 int err;
105018
105019 fs = snd_enter_user();
105020 - err = snd_pcm_delay(substream, &delay);
105021 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
105022 snd_leave_user(fs);
105023 if (err < 0)
105024 return err;
105025 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
105026 index e6d2d97..4843949 100644
105027 --- a/sound/core/pcm_native.c
105028 +++ b/sound/core/pcm_native.c
105029 @@ -2747,11 +2747,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
105030 switch (substream->stream) {
105031 case SNDRV_PCM_STREAM_PLAYBACK:
105032 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
105033 - (void __user *)arg);
105034 + (void __force_user *)arg);
105035 break;
105036 case SNDRV_PCM_STREAM_CAPTURE:
105037 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
105038 - (void __user *)arg);
105039 + (void __force_user *)arg);
105040 break;
105041 default:
105042 result = -EINVAL;
105043 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
105044 index 1f99767..14636533 100644
105045 --- a/sound/core/seq/seq_device.c
105046 +++ b/sound/core/seq/seq_device.c
105047 @@ -63,7 +63,7 @@ struct ops_list {
105048 int argsize; /* argument size */
105049
105050 /* operators */
105051 - struct snd_seq_dev_ops ops;
105052 + struct snd_seq_dev_ops *ops;
105053
105054 /* registred devices */
105055 struct list_head dev_list; /* list of devices */
105056 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
105057
105058 mutex_lock(&ops->reg_mutex);
105059 /* copy driver operators */
105060 - ops->ops = *entry;
105061 + ops->ops = entry;
105062 ops->driver |= DRIVER_LOADED;
105063 ops->argsize = argsize;
105064
105065 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
105066 dev->name, ops->id, ops->argsize, dev->argsize);
105067 return -EINVAL;
105068 }
105069 - if (ops->ops.init_device(dev) >= 0) {
105070 + if (ops->ops->init_device(dev) >= 0) {
105071 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
105072 ops->num_init_devices++;
105073 } else {
105074 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
105075 dev->name, ops->id, ops->argsize, dev->argsize);
105076 return -EINVAL;
105077 }
105078 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
105079 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
105080 dev->status = SNDRV_SEQ_DEVICE_FREE;
105081 dev->driver_data = NULL;
105082 ops->num_init_devices--;
105083 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
105084 index 9284829..ac8e8b2 100644
105085 --- a/sound/drivers/mts64.c
105086 +++ b/sound/drivers/mts64.c
105087 @@ -27,6 +27,7 @@
105088 #include <sound/initval.h>
105089 #include <sound/rawmidi.h>
105090 #include <sound/control.h>
105091 +#include <asm/local.h>
105092
105093 #define CARD_NAME "Miditerminal 4140"
105094 #define DRIVER_NAME "MTS64"
105095 @@ -65,7 +66,7 @@ struct mts64 {
105096 struct pardevice *pardev;
105097 int pardev_claimed;
105098
105099 - int open_count;
105100 + local_t open_count;
105101 int current_midi_output_port;
105102 int current_midi_input_port;
105103 u8 mode[MTS64_NUM_INPUT_PORTS];
105104 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
105105 {
105106 struct mts64 *mts = substream->rmidi->private_data;
105107
105108 - if (mts->open_count == 0) {
105109 + if (local_read(&mts->open_count) == 0) {
105110 /* We don't need a spinlock here, because this is just called
105111 if the device has not been opened before.
105112 So there aren't any IRQs from the device */
105113 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
105114
105115 msleep(50);
105116 }
105117 - ++(mts->open_count);
105118 + local_inc(&mts->open_count);
105119
105120 return 0;
105121 }
105122 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
105123 struct mts64 *mts = substream->rmidi->private_data;
105124 unsigned long flags;
105125
105126 - --(mts->open_count);
105127 - if (mts->open_count == 0) {
105128 + if (local_dec_return(&mts->open_count) == 0) {
105129 /* We need the spinlock_irqsave here because we can still
105130 have IRQs at this point */
105131 spin_lock_irqsave(&mts->lock, flags);
105132 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
105133
105134 msleep(500);
105135
105136 - } else if (mts->open_count < 0)
105137 - mts->open_count = 0;
105138 + } else if (local_read(&mts->open_count) < 0)
105139 + local_set(&mts->open_count, 0);
105140
105141 return 0;
105142 }
105143 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
105144 index 01997f2..cbc1195 100644
105145 --- a/sound/drivers/opl4/opl4_lib.c
105146 +++ b/sound/drivers/opl4/opl4_lib.c
105147 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
105148 MODULE_DESCRIPTION("OPL4 driver");
105149 MODULE_LICENSE("GPL");
105150
105151 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
105152 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
105153 {
105154 int timeout = 10;
105155 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
105156 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
105157 index 60158e2..0a0cc1a 100644
105158 --- a/sound/drivers/portman2x4.c
105159 +++ b/sound/drivers/portman2x4.c
105160 @@ -46,6 +46,7 @@
105161 #include <sound/initval.h>
105162 #include <sound/rawmidi.h>
105163 #include <sound/control.h>
105164 +#include <asm/local.h>
105165
105166 #define CARD_NAME "Portman 2x4"
105167 #define DRIVER_NAME "portman"
105168 @@ -83,7 +84,7 @@ struct portman {
105169 struct pardevice *pardev;
105170 int pardev_claimed;
105171
105172 - int open_count;
105173 + local_t open_count;
105174 int mode[PORTMAN_NUM_INPUT_PORTS];
105175 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
105176 };
105177 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
105178 index 02f79d2..8691d43 100644
105179 --- a/sound/isa/cmi8330.c
105180 +++ b/sound/isa/cmi8330.c
105181 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
105182
105183 struct snd_pcm *pcm;
105184 struct snd_cmi8330_stream {
105185 - struct snd_pcm_ops ops;
105186 + snd_pcm_ops_no_const ops;
105187 snd_pcm_open_callback_t open;
105188 void *private_data; /* sb or wss */
105189 } streams[2];
105190 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
105191 index 733b014..56ce96f 100644
105192 --- a/sound/oss/sb_audio.c
105193 +++ b/sound/oss/sb_audio.c
105194 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
105195 buf16 = (signed short *)(localbuf + localoffs);
105196 while (c)
105197 {
105198 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
105199 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
105200 if (copy_from_user(lbuf8,
105201 userbuf+useroffs + p,
105202 locallen))
105203 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
105204 index 3136c88..28ad950 100644
105205 --- a/sound/oss/swarm_cs4297a.c
105206 +++ b/sound/oss/swarm_cs4297a.c
105207 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
105208 {
105209 struct cs4297a_state *s;
105210 u32 pwr, id;
105211 - mm_segment_t fs;
105212 int rval;
105213 #ifndef CONFIG_BCM_CS4297A_CSWARM
105214 u64 cfg;
105215 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
105216 if (!rval) {
105217 char *sb1250_duart_present;
105218
105219 +#if 0
105220 + mm_segment_t fs;
105221 fs = get_fs();
105222 set_fs(KERNEL_DS);
105223 -#if 0
105224 val = SOUND_MASK_LINE;
105225 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
105226 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
105227 val = initvol[i].vol;
105228 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
105229 }
105230 + set_fs(fs);
105231 // cs4297a_write_ac97(s, 0x18, 0x0808);
105232 #else
105233 // cs4297a_write_ac97(s, 0x5e, 0x180);
105234 cs4297a_write_ac97(s, 0x02, 0x0808);
105235 cs4297a_write_ac97(s, 0x18, 0x0808);
105236 #endif
105237 - set_fs(fs);
105238
105239 list_add(&s->list, &cs4297a_devs);
105240
105241 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
105242 index 78288db..0406809 100644
105243 --- a/sound/pci/ac97/ac97_codec.c
105244 +++ b/sound/pci/ac97/ac97_codec.c
105245 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
105246 }
105247
105248 /* build_ops to do nothing */
105249 -static struct snd_ac97_build_ops null_build_ops;
105250 +static const struct snd_ac97_build_ops null_build_ops;
105251
105252 #ifdef CONFIG_SND_AC97_POWER_SAVE
105253 static void do_update_power(struct work_struct *work)
105254 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
105255 index eeb2e23..82bf625 100644
105256 --- a/sound/pci/ac97/ac97_patch.c
105257 +++ b/sound/pci/ac97/ac97_patch.c
105258 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97)
105259 return 0;
105260 }
105261
105262 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
105263 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
105264 .build_spdif = patch_yamaha_ymf743_build_spdif,
105265 .build_3d = patch_yamaha_ymf7x3_3d,
105266 };
105267 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
105268 return 0;
105269 }
105270
105271 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
105272 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
105273 .build_3d = patch_yamaha_ymf7x3_3d,
105274 .build_post_spdif = patch_yamaha_ymf753_post_spdif
105275 };
105276 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
105277 return 0;
105278 }
105279
105280 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
105281 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
105282 .build_specific = patch_wolfson_wm9703_specific,
105283 };
105284
105285 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
105286 return 0;
105287 }
105288
105289 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
105290 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
105291 .build_specific = patch_wolfson_wm9704_specific,
105292 };
105293
105294 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97)
105295 return 0;
105296 }
105297
105298 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
105299 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
105300 .build_specific = patch_wolfson_wm9705_specific,
105301 };
105302
105303 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
105304 return 0;
105305 }
105306
105307 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
105308 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
105309 .build_specific = patch_wolfson_wm9711_specific,
105310 };
105311
105312 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97)
105313 }
105314 #endif
105315
105316 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
105317 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
105318 .build_specific = patch_wolfson_wm9713_specific,
105319 .build_3d = patch_wolfson_wm9713_3d,
105320 #ifdef CONFIG_PM
105321 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
105322 return 0;
105323 }
105324
105325 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
105326 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
105327 .build_3d = patch_sigmatel_stac9700_3d,
105328 .build_specific = patch_sigmatel_stac97xx_specific
105329 };
105330 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
105331 return patch_sigmatel_stac97xx_specific(ac97);
105332 }
105333
105334 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
105335 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
105336 .build_3d = patch_sigmatel_stac9708_3d,
105337 .build_specific = patch_sigmatel_stac9708_specific
105338 };
105339 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97)
105340 return 0;
105341 }
105342
105343 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
105344 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
105345 .build_3d = patch_sigmatel_stac9700_3d,
105346 .build_specific = patch_sigmatel_stac9758_specific
105347 };
105348 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
105349 return 0;
105350 }
105351
105352 -static struct snd_ac97_build_ops patch_cirrus_ops = {
105353 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
105354 .build_spdif = patch_cirrus_build_spdif
105355 };
105356
105357 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
105358 return 0;
105359 }
105360
105361 -static struct snd_ac97_build_ops patch_conexant_ops = {
105362 +static const struct snd_ac97_build_ops patch_conexant_ops = {
105363 .build_spdif = patch_conexant_build_spdif
105364 };
105365
105366 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int
105367 }
105368 }
105369
105370 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
105371 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
105372 #ifdef CONFIG_PM
105373 .resume = ad18xx_resume
105374 #endif
105375 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97)
105376 return 0;
105377 }
105378
105379 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
105380 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
105381 .build_specific = &patch_ad1885_specific,
105382 #ifdef CONFIG_PM
105383 .resume = ad18xx_resume
105384 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct snd_ac97 * ac97)
105385 return 0;
105386 }
105387
105388 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
105389 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
105390 .build_specific = &patch_ad1886_specific,
105391 #ifdef CONFIG_PM
105392 .resume = ad18xx_resume
105393 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct snd_ac97 * ac97)
105394 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
105395 }
105396
105397 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
105398 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
105399 .build_post_spdif = patch_ad198x_post_spdif,
105400 .build_specific = patch_ad1981a_specific,
105401 #ifdef CONFIG_PM
105402 @@ -1952,7 +1952,7 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97)
105403 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
105404 }
105405
105406 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
105407 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
105408 .build_post_spdif = patch_ad198x_post_spdif,
105409 .build_specific = patch_ad1981b_specific,
105410 #ifdef CONFIG_PM
105411 @@ -2091,7 +2091,7 @@ static int patch_ad1888_specific(struct snd_ac97 *ac97)
105412 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
105413 }
105414
105415 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
105416 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
105417 .build_post_spdif = patch_ad198x_post_spdif,
105418 .build_specific = patch_ad1888_specific,
105419 #ifdef CONFIG_PM
105420 @@ -2140,7 +2140,7 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97)
105421 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
105422 }
105423
105424 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
105425 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
105426 .build_post_spdif = patch_ad198x_post_spdif,
105427 .build_specific = patch_ad1980_specific,
105428 #ifdef CONFIG_PM
105429 @@ -2255,7 +2255,7 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97)
105430 ARRAY_SIZE(snd_ac97_ad1985_controls));
105431 }
105432
105433 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
105434 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
105435 .build_post_spdif = patch_ad198x_post_spdif,
105436 .build_specific = patch_ad1985_specific,
105437 #ifdef CONFIG_PM
105438 @@ -2547,7 +2547,7 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97)
105439 ARRAY_SIZE(snd_ac97_ad1985_controls));
105440 }
105441
105442 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
105443 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
105444 .build_post_spdif = patch_ad198x_post_spdif,
105445 .build_specific = patch_ad1986_specific,
105446 #ifdef CONFIG_PM
105447 @@ -2652,7 +2652,7 @@ static int patch_alc650_specific(struct snd_ac97 * ac97)
105448 return 0;
105449 }
105450
105451 -static struct snd_ac97_build_ops patch_alc650_ops = {
105452 +static const struct snd_ac97_build_ops patch_alc650_ops = {
105453 .build_specific = patch_alc650_specific,
105454 .update_jacks = alc650_update_jacks
105455 };
105456 @@ -2804,7 +2804,7 @@ static int patch_alc655_specific(struct snd_ac97 * ac97)
105457 return 0;
105458 }
105459
105460 -static struct snd_ac97_build_ops patch_alc655_ops = {
105461 +static const struct snd_ac97_build_ops patch_alc655_ops = {
105462 .build_specific = patch_alc655_specific,
105463 .update_jacks = alc655_update_jacks
105464 };
105465 @@ -2916,7 +2916,7 @@ static int patch_alc850_specific(struct snd_ac97 *ac97)
105466 return 0;
105467 }
105468
105469 -static struct snd_ac97_build_ops patch_alc850_ops = {
105470 +static const struct snd_ac97_build_ops patch_alc850_ops = {
105471 .build_specific = patch_alc850_specific,
105472 .update_jacks = alc850_update_jacks
105473 };
105474 @@ -2978,7 +2978,7 @@ static int patch_cm9738_specific(struct snd_ac97 * ac97)
105475 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
105476 }
105477
105478 -static struct snd_ac97_build_ops patch_cm9738_ops = {
105479 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
105480 .build_specific = patch_cm9738_specific,
105481 .update_jacks = cm9738_update_jacks
105482 };
105483 @@ -3069,7 +3069,7 @@ static int patch_cm9739_post_spdif(struct snd_ac97 * ac97)
105484 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
105485 }
105486
105487 -static struct snd_ac97_build_ops patch_cm9739_ops = {
105488 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
105489 .build_specific = patch_cm9739_specific,
105490 .build_post_spdif = patch_cm9739_post_spdif,
105491 .update_jacks = cm9739_update_jacks
105492 @@ -3243,7 +3243,7 @@ static int patch_cm9761_specific(struct snd_ac97 * ac97)
105493 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
105494 }
105495
105496 -static struct snd_ac97_build_ops patch_cm9761_ops = {
105497 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
105498 .build_specific = patch_cm9761_specific,
105499 .build_post_spdif = patch_cm9761_post_spdif,
105500 .update_jacks = cm9761_update_jacks
105501 @@ -3339,7 +3339,7 @@ static int patch_cm9780_specific(struct snd_ac97 *ac97)
105502 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
105503 }
105504
105505 -static struct snd_ac97_build_ops patch_cm9780_ops = {
105506 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
105507 .build_specific = patch_cm9780_specific,
105508 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
105509 };
105510 @@ -3459,7 +3459,7 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97)
105511 return 0;
105512 }
105513
105514 -static struct snd_ac97_build_ops patch_vt1616_ops = {
105515 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
105516 .build_specific = patch_vt1616_specific
105517 };
105518
105519 @@ -3813,7 +3813,7 @@ static int patch_it2646_specific(struct snd_ac97 * ac97)
105520 return 0;
105521 }
105522
105523 -static struct snd_ac97_build_ops patch_it2646_ops = {
105524 +static const struct snd_ac97_build_ops patch_it2646_ops = {
105525 .build_specific = patch_it2646_specific,
105526 .update_jacks = it2646_update_jacks
105527 };
105528 @@ -3847,7 +3847,7 @@ static int patch_si3036_specific(struct snd_ac97 * ac97)
105529 return 0;
105530 }
105531
105532 -static struct snd_ac97_build_ops patch_si3036_ops = {
105533 +static const struct snd_ac97_build_ops patch_si3036_ops = {
105534 .build_specific = patch_si3036_specific,
105535 };
105536
105537 @@ -3914,7 +3914,7 @@ static int patch_ucb1400_specific(struct snd_ac97 * ac97)
105538 return 0;
105539 }
105540
105541 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
105542 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
105543 .build_specific = patch_ucb1400_specific,
105544 };
105545
105546 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
105547 index 99552fb..4dcc2c5 100644
105548 --- a/sound/pci/hda/hda_codec.h
105549 +++ b/sound/pci/hda/hda_codec.h
105550 @@ -580,7 +580,7 @@ struct hda_bus_ops {
105551 /* notify power-up/down from codec to controller */
105552 void (*pm_notify)(struct hda_bus *bus);
105553 #endif
105554 -};
105555 +} __no_const;
105556
105557 /* template to pass to the bus constructor */
105558 struct hda_bus_template {
105559 @@ -675,6 +675,7 @@ struct hda_codec_ops {
105560 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
105561 #endif
105562 };
105563 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
105564
105565 /* record for amp information cache */
105566 struct hda_cache_head {
105567 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
105568 struct snd_pcm_substream *substream);
105569 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
105570 struct snd_pcm_substream *substream);
105571 -};
105572 +} __no_const;
105573
105574 /* PCM information for each substream */
105575 struct hda_pcm_stream {
105576 @@ -760,7 +761,7 @@ struct hda_codec {
105577 const char *modelname; /* model name for preset */
105578
105579 /* set by patch */
105580 - struct hda_codec_ops patch_ops;
105581 + hda_codec_ops_no_const patch_ops;
105582
105583 /* PCM to create, set by patch_ops.build_pcms callback */
105584 unsigned int num_pcms;
105585 diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
105586 index fb684f0..2b11cea 100644
105587 --- a/sound/pci/hda/patch_atihdmi.c
105588 +++ b/sound/pci/hda/patch_atihdmi.c
105589 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_codec *codec)
105590 */
105591 spec->multiout.dig_out_nid = CVT_NID;
105592
105593 - codec->patch_ops = atihdmi_patch_ops;
105594 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
105595
105596 return 0;
105597 }
105598 diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
105599 index 7c23016..c5bfdd7 100644
105600 --- a/sound/pci/hda/patch_intelhdmi.c
105601 +++ b/sound/pci/hda/patch_intelhdmi.c
105602 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
105603 cp_ready);
105604
105605 /* TODO */
105606 - if (cp_state)
105607 - ;
105608 - if (cp_ready)
105609 - ;
105610 + if (cp_state) {
105611 + }
105612 + if (cp_ready) {
105613 + }
105614 }
105615
105616
105617 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hda_codec *codec)
105618 spec->multiout.dig_out_nid = cvt_nid;
105619
105620 codec->spec = spec;
105621 - codec->patch_ops = intel_hdmi_patch_ops;
105622 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
105623
105624 snd_hda_eld_proc_new(codec, &spec->sink_eld);
105625
105626 diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
105627 index 6afdab0..68ed352 100644
105628 --- a/sound/pci/hda/patch_nvhdmi.c
105629 +++ b/sound/pci/hda/patch_nvhdmi.c
105630 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_codec *codec)
105631 spec->multiout.max_channels = 8;
105632 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
105633
105634 - codec->patch_ops = nvhdmi_patch_ops_8ch;
105635 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
105636
105637 return 0;
105638 }
105639 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
105640 spec->multiout.max_channels = 2;
105641 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
105642
105643 - codec->patch_ops = nvhdmi_patch_ops_2ch;
105644 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
105645
105646 return 0;
105647 }
105648 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
105649 index 2fcd70d..a143eaf 100644
105650 --- a/sound/pci/hda/patch_sigmatel.c
105651 +++ b/sound/pci/hda/patch_sigmatel.c
105652 @@ -5220,7 +5220,7 @@ again:
105653 snd_hda_codec_write_cache(codec, nid, 0,
105654 AC_VERB_SET_CONNECT_SEL, num_dacs);
105655
105656 - codec->patch_ops = stac92xx_patch_ops;
105657 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
105658
105659 codec->proc_widget_hook = stac92hd_proc_hook;
105660
105661 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
105662 return -ENOMEM;
105663
105664 codec->spec = spec;
105665 - codec->patch_ops = stac92xx_patch_ops;
105666 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
105667 spec->num_pins = STAC92HD71BXX_NUM_PINS;
105668 switch (codec->vendor_id) {
105669 case 0x111d76b6:
105670 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
105671 index d063149..01599a4 100644
105672 --- a/sound/pci/ice1712/ice1712.h
105673 +++ b/sound/pci/ice1712/ice1712.h
105674 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
105675 unsigned int mask_flags; /* total mask bits */
105676 struct snd_akm4xxx_ops {
105677 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
105678 - } ops;
105679 + } __no_const ops;
105680 };
105681
105682 struct snd_ice1712_spdif {
105683 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
105684 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
105685 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
105686 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
105687 - } ops;
105688 + } __no_const ops;
105689 };
105690
105691
105692 diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
105693 index 9e7d12e..3e3bc64 100644
105694 --- a/sound/pci/intel8x0m.c
105695 +++ b/sound/pci/intel8x0m.c
105696 @@ -1264,7 +1264,7 @@ static struct shortname_table {
105697 { 0x5455, "ALi M5455" },
105698 { 0x746d, "AMD AMD8111" },
105699 #endif
105700 - { 0 },
105701 + { 0, },
105702 };
105703
105704 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
105705 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
105706 index 5518371..45cf7ac 100644
105707 --- a/sound/pci/ymfpci/ymfpci_main.c
105708 +++ b/sound/pci/ymfpci/ymfpci_main.c
105709 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
105710 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
105711 break;
105712 }
105713 - if (atomic_read(&chip->interrupt_sleep_count)) {
105714 - atomic_set(&chip->interrupt_sleep_count, 0);
105715 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
105716 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
105717 wake_up(&chip->interrupt_sleep);
105718 }
105719 __end:
105720 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
105721 continue;
105722 init_waitqueue_entry(&wait, current);
105723 add_wait_queue(&chip->interrupt_sleep, &wait);
105724 - atomic_inc(&chip->interrupt_sleep_count);
105725 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
105726 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
105727 remove_wait_queue(&chip->interrupt_sleep, &wait);
105728 }
105729 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
105730 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
105731 spin_unlock(&chip->reg_lock);
105732
105733 - if (atomic_read(&chip->interrupt_sleep_count)) {
105734 - atomic_set(&chip->interrupt_sleep_count, 0);
105735 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
105736 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
105737 wake_up(&chip->interrupt_sleep);
105738 }
105739 }
105740 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
105741 spin_lock_init(&chip->reg_lock);
105742 spin_lock_init(&chip->voice_lock);
105743 init_waitqueue_head(&chip->interrupt_sleep);
105744 - atomic_set(&chip->interrupt_sleep_count, 0);
105745 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
105746 chip->card = card;
105747 chip->pci = pci;
105748 chip->irq = -1;
105749 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
105750 index 0a1b2f6..776bb19 100644
105751 --- a/sound/soc/soc-core.c
105752 +++ b/sound/soc/soc-core.c
105753 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
105754 }
105755
105756 /* ASoC PCM operations */
105757 -static struct snd_pcm_ops soc_pcm_ops = {
105758 +static snd_pcm_ops_no_const soc_pcm_ops = {
105759 .open = soc_pcm_open,
105760 .close = soc_codec_close,
105761 .hw_params = soc_pcm_hw_params,
105762 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
105763 index 79633ea..9732e90 100644
105764 --- a/sound/usb/usbaudio.c
105765 +++ b/sound/usb/usbaudio.c
105766 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream,
105767 switch (cmd) {
105768 case SNDRV_PCM_TRIGGER_START:
105769 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
105770 - subs->ops.prepare = prepare_playback_urb;
105771 + *(void **)&subs->ops.prepare = prepare_playback_urb;
105772 return 0;
105773 case SNDRV_PCM_TRIGGER_STOP:
105774 return deactivate_urbs(subs, 0, 0);
105775 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
105776 - subs->ops.prepare = prepare_nodata_playback_urb;
105777 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
105778 return 0;
105779 default:
105780 return -EINVAL;
105781 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream,
105782
105783 switch (cmd) {
105784 case SNDRV_PCM_TRIGGER_START:
105785 - subs->ops.retire = retire_capture_urb;
105786 + *(void **)&subs->ops.retire = retire_capture_urb;
105787 return start_urbs(subs, substream->runtime);
105788 case SNDRV_PCM_TRIGGER_STOP:
105789 return deactivate_urbs(subs, 0, 0);
105790 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
105791 - subs->ops.retire = retire_paused_capture_urb;
105792 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
105793 return 0;
105794 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
105795 - subs->ops.retire = retire_capture_urb;
105796 + *(void **)&subs->ops.retire = retire_capture_urb;
105797 return 0;
105798 default:
105799 return -EINVAL;
105800 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
105801 /* for playback, submit the URBs now; otherwise, the first hwptr_done
105802 * updates for all URBs would happen at the same time when starting */
105803 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
105804 - subs->ops.prepare = prepare_nodata_playback_urb;
105805 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
105806 return start_urbs(subs, runtime);
105807 } else
105808 return 0;
105809 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
105810 subs->direction = stream;
105811 subs->dev = as->chip->dev;
105812 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
105813 - subs->ops = audio_urb_ops[stream];
105814 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
105815 } else {
105816 - subs->ops = audio_urb_ops_high_speed[stream];
105817 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
105818 switch (as->chip->usb_id) {
105819 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
105820 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
105821 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
105822 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
105823 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
105824 break;
105825 }
105826 }
105827 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
105828 new file mode 100644
105829 index 0000000..ca64170
105830 --- /dev/null
105831 +++ b/tools/gcc/Makefile
105832 @@ -0,0 +1,26 @@
105833 +#CC := gcc
105834 +#PLUGIN_SOURCE_FILES := pax_plugin.c
105835 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
105836 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
105837 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
105838 +
105839 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
105840 +CFLAGS_size_overflow_plugin.o := -Wno-missing-initializer
105841 +
105842 +hostlibs-y := constify_plugin.so
105843 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
105844 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
105845 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
105846 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
105847 +hostlibs-y += colorize_plugin.so
105848 +hostlibs-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
105849 +
105850 +always := $(hostlibs-y)
105851 +
105852 +constify_plugin-objs := constify_plugin.o
105853 +stackleak_plugin-objs := stackleak_plugin.o
105854 +kallocstat_plugin-objs := kallocstat_plugin.o
105855 +kernexec_plugin-objs := kernexec_plugin.o
105856 +checker_plugin-objs := checker_plugin.o
105857 +colorize_plugin-objs := colorize_plugin.o
105858 +size_overflow_plugin-objs := size_overflow_plugin.o
105859 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
105860 new file mode 100644
105861 index 0000000..d41b5af
105862 --- /dev/null
105863 +++ b/tools/gcc/checker_plugin.c
105864 @@ -0,0 +1,171 @@
105865 +/*
105866 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
105867 + * Licensed under the GPL v2
105868 + *
105869 + * Note: the choice of the license means that the compilation process is
105870 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
105871 + * but for the kernel it doesn't matter since it doesn't link against
105872 + * any of the gcc libraries
105873 + *
105874 + * gcc plugin to implement various sparse (source code checker) features
105875 + *
105876 + * TODO:
105877 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
105878 + *
105879 + * BUGS:
105880 + * - none known
105881 + */
105882 +#include "gcc-plugin.h"
105883 +#include "config.h"
105884 +#include "system.h"
105885 +#include "coretypes.h"
105886 +#include "tree.h"
105887 +#include "tree-pass.h"
105888 +#include "flags.h"
105889 +#include "intl.h"
105890 +#include "toplev.h"
105891 +#include "plugin.h"
105892 +//#include "expr.h" where are you...
105893 +#include "diagnostic.h"
105894 +#include "plugin-version.h"
105895 +#include "tm.h"
105896 +#include "function.h"
105897 +#include "basic-block.h"
105898 +#include "gimple.h"
105899 +#include "rtl.h"
105900 +#include "emit-rtl.h"
105901 +#include "tree-flow.h"
105902 +#include "target.h"
105903 +
105904 +extern void c_register_addr_space (const char *str, addr_space_t as);
105905 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
105906 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
105907 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
105908 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
105909 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
105910 +
105911 +extern void print_gimple_stmt(FILE *, gimple, int, int);
105912 +extern rtx emit_move_insn(rtx x, rtx y);
105913 +
105914 +int plugin_is_GPL_compatible;
105915 +
105916 +static struct plugin_info checker_plugin_info = {
105917 + .version = "201111150100",
105918 +};
105919 +
105920 +#define ADDR_SPACE_KERNEL 0
105921 +#define ADDR_SPACE_FORCE_KERNEL 1
105922 +#define ADDR_SPACE_USER 2
105923 +#define ADDR_SPACE_FORCE_USER 3
105924 +#define ADDR_SPACE_IOMEM 0
105925 +#define ADDR_SPACE_FORCE_IOMEM 0
105926 +#define ADDR_SPACE_PERCPU 0
105927 +#define ADDR_SPACE_FORCE_PERCPU 0
105928 +#define ADDR_SPACE_RCU 0
105929 +#define ADDR_SPACE_FORCE_RCU 0
105930 +
105931 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
105932 +{
105933 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
105934 +}
105935 +
105936 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
105937 +{
105938 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
105939 +}
105940 +
105941 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
105942 +{
105943 + return default_addr_space_valid_pointer_mode(mode, as);
105944 +}
105945 +
105946 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
105947 +{
105948 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
105949 +}
105950 +
105951 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
105952 +{
105953 + return default_addr_space_legitimize_address(x, oldx, mode, as);
105954 +}
105955 +
105956 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
105957 +{
105958 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
105959 + return true;
105960 +
105961 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
105962 + return true;
105963 +
105964 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
105965 + return true;
105966 +
105967 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
105968 + return true;
105969 +
105970 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
105971 + return true;
105972 +
105973 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
105974 + return true;
105975 +
105976 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
105977 + return true;
105978 +
105979 + return subset == superset;
105980 +}
105981 +
105982 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
105983 +{
105984 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
105985 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
105986 +
105987 + return op;
105988 +}
105989 +
105990 +static void register_checker_address_spaces(void *event_data, void *data)
105991 +{
105992 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
105993 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
105994 + c_register_addr_space("__user", ADDR_SPACE_USER);
105995 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
105996 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
105997 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
105998 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
105999 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
106000 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
106001 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
106002 +
106003 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
106004 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
106005 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
106006 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
106007 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
106008 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
106009 + targetm.addr_space.convert = checker_addr_space_convert;
106010 +}
106011 +
106012 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
106013 +{
106014 + const char * const plugin_name = plugin_info->base_name;
106015 + const int argc = plugin_info->argc;
106016 + const struct plugin_argument * const argv = plugin_info->argv;
106017 + int i;
106018 +
106019 + if (!plugin_default_version_check(version, &gcc_version)) {
106020 + error(G_("incompatible gcc/plugin versions"));
106021 + return 1;
106022 + }
106023 +
106024 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
106025 +
106026 + for (i = 0; i < argc; ++i)
106027 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
106028 +
106029 + if (TARGET_64BIT == 0)
106030 + return 0;
106031 +
106032 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
106033 +
106034 + return 0;
106035 +}
106036 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
106037 new file mode 100644
106038 index 0000000..ee950d0
106039 --- /dev/null
106040 +++ b/tools/gcc/colorize_plugin.c
106041 @@ -0,0 +1,147 @@
106042 +/*
106043 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
106044 + * Licensed under the GPL v2
106045 + *
106046 + * Note: the choice of the license means that the compilation process is
106047 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
106048 + * but for the kernel it doesn't matter since it doesn't link against
106049 + * any of the gcc libraries
106050 + *
106051 + * gcc plugin to colorize diagnostic output
106052 + *
106053 + */
106054 +
106055 +#include "gcc-plugin.h"
106056 +#include "config.h"
106057 +#include "system.h"
106058 +#include "coretypes.h"
106059 +#include "tree.h"
106060 +#include "tree-pass.h"
106061 +#include "flags.h"
106062 +#include "intl.h"
106063 +#include "toplev.h"
106064 +#include "plugin.h"
106065 +#include "diagnostic.h"
106066 +#include "plugin-version.h"
106067 +#include "tm.h"
106068 +
106069 +int plugin_is_GPL_compatible;
106070 +
106071 +static struct plugin_info colorize_plugin_info = {
106072 + .version = "201203092200",
106073 +};
106074 +
106075 +#define GREEN "\033[32m\033[2m"
106076 +#define LIGHTGREEN "\033[32m\033[1m"
106077 +#define YELLOW "\033[33m\033[2m"
106078 +#define LIGHTYELLOW "\033[33m\033[1m"
106079 +#define RED "\033[31m\033[2m"
106080 +#define LIGHTRED "\033[31m\033[1m"
106081 +#define BLUE "\033[34m\033[2m"
106082 +#define LIGHTBLUE "\033[34m\033[1m"
106083 +#define BRIGHT "\033[m\033[1m"
106084 +#define NORMAL "\033[m"
106085 +
106086 +static diagnostic_starter_fn old_starter;
106087 +static diagnostic_finalizer_fn old_finalizer;
106088 +
106089 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
106090 +{
106091 + const char *color;
106092 + char *newprefix;
106093 +
106094 + switch (diagnostic->kind) {
106095 + case DK_NOTE:
106096 + color = LIGHTBLUE;
106097 + break;
106098 +
106099 + case DK_PEDWARN:
106100 + case DK_WARNING:
106101 + color = LIGHTYELLOW;
106102 + break;
106103 +
106104 + case DK_ERROR:
106105 + case DK_FATAL:
106106 + case DK_ICE:
106107 + case DK_PERMERROR:
106108 + case DK_SORRY:
106109 + color = LIGHTRED;
106110 + break;
106111 +
106112 + default:
106113 + color = NORMAL;
106114 + }
106115 +
106116 + old_starter(context, diagnostic);
106117 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
106118 + return;
106119 + pp_destroy_prefix(context->printer);
106120 + pp_set_prefix(context->printer, newprefix);
106121 +}
106122 +
106123 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
106124 +{
106125 + old_finalizer(context, diagnostic);
106126 +}
106127 +
106128 +static void colorize_arm(void)
106129 +{
106130 + old_starter = diagnostic_starter(global_dc);
106131 + old_finalizer = diagnostic_finalizer(global_dc);
106132 +
106133 + diagnostic_starter(global_dc) = start_colorize;
106134 + diagnostic_finalizer(global_dc) = finalize_colorize;
106135 +}
106136 +
106137 +static unsigned int execute_colorize_rearm(void)
106138 +{
106139 + if (diagnostic_starter(global_dc) == start_colorize)
106140 + return 0;
106141 +
106142 + colorize_arm();
106143 + return 0;
106144 +}
106145 +
106146 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
106147 + .pass = {
106148 + .type = SIMPLE_IPA_PASS,
106149 + .name = "colorize_rearm",
106150 + .gate = NULL,
106151 + .execute = execute_colorize_rearm,
106152 + .sub = NULL,
106153 + .next = NULL,
106154 + .static_pass_number = 0,
106155 + .tv_id = TV_NONE,
106156 + .properties_required = 0,
106157 + .properties_provided = 0,
106158 + .properties_destroyed = 0,
106159 + .todo_flags_start = 0,
106160 + .todo_flags_finish = 0
106161 + }
106162 +};
106163 +
106164 +static void colorize_start_unit(void *gcc_data, void *user_data)
106165 +{
106166 + colorize_arm();
106167 +}
106168 +
106169 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
106170 +{
106171 + const char * const plugin_name = plugin_info->base_name;
106172 + struct register_pass_info colorize_rearm_pass_info = {
106173 + .pass = &pass_ipa_colorize_rearm.pass,
106174 + .reference_pass_name = "*free_lang_data",
106175 + .ref_pass_instance_number = 0,
106176 + .pos_op = PASS_POS_INSERT_AFTER
106177 + };
106178 +
106179 + if (!plugin_default_version_check(version, &gcc_version)) {
106180 + error(G_("incompatible gcc/plugin versions"));
106181 + return 1;
106182 + }
106183 +
106184 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
106185 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
106186 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
106187 + return 0;
106188 +}
106189 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
106190 new file mode 100644
106191 index 0000000..704a564
106192 --- /dev/null
106193 +++ b/tools/gcc/constify_plugin.c
106194 @@ -0,0 +1,303 @@
106195 +/*
106196 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
106197 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
106198 + * Licensed under the GPL v2, or (at your option) v3
106199 + *
106200 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
106201 + *
106202 + * Homepage:
106203 + * http://www.grsecurity.net/~ephox/const_plugin/
106204 + *
106205 + * Usage:
106206 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
106207 + * $ gcc -fplugin=constify_plugin.so test.c -O2
106208 + */
106209 +
106210 +#include "gcc-plugin.h"
106211 +#include "config.h"
106212 +#include "system.h"
106213 +#include "coretypes.h"
106214 +#include "tree.h"
106215 +#include "tree-pass.h"
106216 +#include "flags.h"
106217 +#include "intl.h"
106218 +#include "toplev.h"
106219 +#include "plugin.h"
106220 +#include "diagnostic.h"
106221 +#include "plugin-version.h"
106222 +#include "tm.h"
106223 +#include "function.h"
106224 +#include "basic-block.h"
106225 +#include "gimple.h"
106226 +#include "rtl.h"
106227 +#include "emit-rtl.h"
106228 +#include "tree-flow.h"
106229 +
106230 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
106231 +
106232 +int plugin_is_GPL_compatible;
106233 +
106234 +static struct plugin_info const_plugin_info = {
106235 + .version = "201111150100",
106236 + .help = "no-constify\tturn off constification\n",
106237 +};
106238 +
106239 +static void constify_type(tree type);
106240 +static bool walk_struct(tree node);
106241 +
106242 +static tree deconstify_type(tree old_type)
106243 +{
106244 + tree new_type, field;
106245 +
106246 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
106247 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
106248 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
106249 + DECL_FIELD_CONTEXT(field) = new_type;
106250 + TYPE_READONLY(new_type) = 0;
106251 + C_TYPE_FIELDS_READONLY(new_type) = 0;
106252 + return new_type;
106253 +}
106254 +
106255 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
106256 +{
106257 + tree type;
106258 +
106259 + *no_add_attrs = true;
106260 + if (TREE_CODE(*node) == FUNCTION_DECL) {
106261 + error("%qE attribute does not apply to functions", name);
106262 + return NULL_TREE;
106263 + }
106264 +
106265 + if (TREE_CODE(*node) == VAR_DECL) {
106266 + error("%qE attribute does not apply to variables", name);
106267 + return NULL_TREE;
106268 + }
106269 +
106270 + if (TYPE_P(*node)) {
106271 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
106272 + *no_add_attrs = false;
106273 + else
106274 + error("%qE attribute applies to struct and union types only", name);
106275 + return NULL_TREE;
106276 + }
106277 +
106278 + type = TREE_TYPE(*node);
106279 +
106280 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
106281 + error("%qE attribute applies to struct and union types only", name);
106282 + return NULL_TREE;
106283 + }
106284 +
106285 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
106286 + error("%qE attribute is already applied to the type", name);
106287 + return NULL_TREE;
106288 + }
106289 +
106290 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
106291 + error("%qE attribute used on type that is not constified", name);
106292 + return NULL_TREE;
106293 + }
106294 +
106295 + if (TREE_CODE(*node) == TYPE_DECL) {
106296 + TREE_TYPE(*node) = deconstify_type(type);
106297 + TREE_READONLY(*node) = 0;
106298 + return NULL_TREE;
106299 + }
106300 +
106301 + return NULL_TREE;
106302 +}
106303 +
106304 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
106305 +{
106306 + *no_add_attrs = true;
106307 + if (!TYPE_P(*node)) {
106308 + error("%qE attribute applies to types only", name);
106309 + return NULL_TREE;
106310 + }
106311 +
106312 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
106313 + error("%qE attribute applies to struct and union types only", name);
106314 + return NULL_TREE;
106315 + }
106316 +
106317 + *no_add_attrs = false;
106318 + constify_type(*node);
106319 + return NULL_TREE;
106320 +}
106321 +
106322 +static struct attribute_spec no_const_attr = {
106323 + .name = "no_const",
106324 + .min_length = 0,
106325 + .max_length = 0,
106326 + .decl_required = false,
106327 + .type_required = false,
106328 + .function_type_required = false,
106329 + .handler = handle_no_const_attribute,
106330 +#if BUILDING_GCC_VERSION >= 4007
106331 + .affects_type_identity = true
106332 +#endif
106333 +};
106334 +
106335 +static struct attribute_spec do_const_attr = {
106336 + .name = "do_const",
106337 + .min_length = 0,
106338 + .max_length = 0,
106339 + .decl_required = false,
106340 + .type_required = false,
106341 + .function_type_required = false,
106342 + .handler = handle_do_const_attribute,
106343 +#if BUILDING_GCC_VERSION >= 4007
106344 + .affects_type_identity = true
106345 +#endif
106346 +};
106347 +
106348 +static void register_attributes(void *event_data, void *data)
106349 +{
106350 + register_attribute(&no_const_attr);
106351 + register_attribute(&do_const_attr);
106352 +}
106353 +
106354 +static void constify_type(tree type)
106355 +{
106356 + TYPE_READONLY(type) = 1;
106357 + C_TYPE_FIELDS_READONLY(type) = 1;
106358 +}
106359 +
106360 +static bool is_fptr(tree field)
106361 +{
106362 + tree ptr = TREE_TYPE(field);
106363 +
106364 + if (TREE_CODE(ptr) != POINTER_TYPE)
106365 + return false;
106366 +
106367 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
106368 +}
106369 +
106370 +static bool walk_struct(tree node)
106371 +{
106372 + tree field;
106373 +
106374 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
106375 + return false;
106376 +
106377 + if (TYPE_FIELDS(node) == NULL_TREE)
106378 + return false;
106379 +
106380 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
106381 + tree type = TREE_TYPE(field);
106382 + enum tree_code code = TREE_CODE(type);
106383 + if (code == RECORD_TYPE || code == UNION_TYPE) {
106384 + if (!(walk_struct(type)))
106385 + return false;
106386 + } else if (!is_fptr(field) && !TREE_READONLY(field))
106387 + return false;
106388 + }
106389 + return true;
106390 +}
106391 +
106392 +static void finish_type(void *event_data, void *data)
106393 +{
106394 + tree type = (tree)event_data;
106395 +
106396 + if (type == NULL_TREE)
106397 + return;
106398 +
106399 + if (TYPE_READONLY(type))
106400 + return;
106401 +
106402 + if (walk_struct(type))
106403 + constify_type(type);
106404 +}
106405 +
106406 +static unsigned int check_local_variables(void);
106407 +
106408 +struct gimple_opt_pass pass_local_variable = {
106409 + {
106410 + .type = GIMPLE_PASS,
106411 + .name = "check_local_variables",
106412 + .gate = NULL,
106413 + .execute = check_local_variables,
106414 + .sub = NULL,
106415 + .next = NULL,
106416 + .static_pass_number = 0,
106417 + .tv_id = TV_NONE,
106418 + .properties_required = 0,
106419 + .properties_provided = 0,
106420 + .properties_destroyed = 0,
106421 + .todo_flags_start = 0,
106422 + .todo_flags_finish = 0
106423 + }
106424 +};
106425 +
106426 +static unsigned int check_local_variables(void)
106427 +{
106428 + tree var;
106429 + referenced_var_iterator rvi;
106430 +
106431 +#if BUILDING_GCC_VERSION == 4005
106432 + FOR_EACH_REFERENCED_VAR(var, rvi) {
106433 +#else
106434 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
106435 +#endif
106436 + tree type = TREE_TYPE(var);
106437 +
106438 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
106439 + continue;
106440 +
106441 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
106442 + continue;
106443 +
106444 + if (!TYPE_READONLY(type))
106445 + continue;
106446 +
106447 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
106448 +// continue;
106449 +
106450 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
106451 +// continue;
106452 +
106453 + if (walk_struct(type)) {
106454 + error("constified variable %qE cannot be local", var);
106455 + return 1;
106456 + }
106457 + }
106458 + return 0;
106459 +}
106460 +
106461 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
106462 +{
106463 + const char * const plugin_name = plugin_info->base_name;
106464 + const int argc = plugin_info->argc;
106465 + const struct plugin_argument * const argv = plugin_info->argv;
106466 + int i;
106467 + bool constify = true;
106468 +
106469 + struct register_pass_info local_variable_pass_info = {
106470 + .pass = &pass_local_variable.pass,
106471 + .reference_pass_name = "*referenced_vars",
106472 + .ref_pass_instance_number = 0,
106473 + .pos_op = PASS_POS_INSERT_AFTER
106474 + };
106475 +
106476 + if (!plugin_default_version_check(version, &gcc_version)) {
106477 + error(G_("incompatible gcc/plugin versions"));
106478 + return 1;
106479 + }
106480 +
106481 + for (i = 0; i < argc; ++i) {
106482 + if (!(strcmp(argv[i].key, "no-constify"))) {
106483 + constify = false;
106484 + continue;
106485 + }
106486 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
106487 + }
106488 +
106489 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
106490 + if (constify) {
106491 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
106492 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
106493 + }
106494 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
106495 +
106496 + return 0;
106497 +}
106498 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
106499 new file mode 100644
106500 index 0000000..a5eabce
106501 --- /dev/null
106502 +++ b/tools/gcc/kallocstat_plugin.c
106503 @@ -0,0 +1,167 @@
106504 +/*
106505 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
106506 + * Licensed under the GPL v2
106507 + *
106508 + * Note: the choice of the license means that the compilation process is
106509 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
106510 + * but for the kernel it doesn't matter since it doesn't link against
106511 + * any of the gcc libraries
106512 + *
106513 + * gcc plugin to find the distribution of k*alloc sizes
106514 + *
106515 + * TODO:
106516 + *
106517 + * BUGS:
106518 + * - none known
106519 + */
106520 +#include "gcc-plugin.h"
106521 +#include "config.h"
106522 +#include "system.h"
106523 +#include "coretypes.h"
106524 +#include "tree.h"
106525 +#include "tree-pass.h"
106526 +#include "flags.h"
106527 +#include "intl.h"
106528 +#include "toplev.h"
106529 +#include "plugin.h"
106530 +//#include "expr.h" where are you...
106531 +#include "diagnostic.h"
106532 +#include "plugin-version.h"
106533 +#include "tm.h"
106534 +#include "function.h"
106535 +#include "basic-block.h"
106536 +#include "gimple.h"
106537 +#include "rtl.h"
106538 +#include "emit-rtl.h"
106539 +
106540 +extern void print_gimple_stmt(FILE *, gimple, int, int);
106541 +
106542 +int plugin_is_GPL_compatible;
106543 +
106544 +static const char * const kalloc_functions[] = {
106545 + "__kmalloc",
106546 + "kmalloc",
106547 + "kmalloc_large",
106548 + "kmalloc_node",
106549 + "kmalloc_order",
106550 + "kmalloc_order_trace",
106551 + "kmalloc_slab",
106552 + "kzalloc",
106553 + "kzalloc_node",
106554 +};
106555 +
106556 +static struct plugin_info kallocstat_plugin_info = {
106557 + .version = "201111150100",
106558 +};
106559 +
106560 +static unsigned int execute_kallocstat(void);
106561 +
106562 +static struct gimple_opt_pass kallocstat_pass = {
106563 + .pass = {
106564 + .type = GIMPLE_PASS,
106565 + .name = "kallocstat",
106566 + .gate = NULL,
106567 + .execute = execute_kallocstat,
106568 + .sub = NULL,
106569 + .next = NULL,
106570 + .static_pass_number = 0,
106571 + .tv_id = TV_NONE,
106572 + .properties_required = 0,
106573 + .properties_provided = 0,
106574 + .properties_destroyed = 0,
106575 + .todo_flags_start = 0,
106576 + .todo_flags_finish = 0
106577 + }
106578 +};
106579 +
106580 +static bool is_kalloc(const char *fnname)
106581 +{
106582 + size_t i;
106583 +
106584 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
106585 + if (!strcmp(fnname, kalloc_functions[i]))
106586 + return true;
106587 + return false;
106588 +}
106589 +
106590 +static unsigned int execute_kallocstat(void)
106591 +{
106592 + basic_block bb;
106593 +
106594 + // 1. loop through BBs and GIMPLE statements
106595 + FOR_EACH_BB(bb) {
106596 + gimple_stmt_iterator gsi;
106597 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
106598 + // gimple match:
106599 + tree fndecl, size;
106600 + gimple call_stmt;
106601 + const char *fnname;
106602 +
106603 + // is it a call
106604 + call_stmt = gsi_stmt(gsi);
106605 + if (!is_gimple_call(call_stmt))
106606 + continue;
106607 + fndecl = gimple_call_fndecl(call_stmt);
106608 + if (fndecl == NULL_TREE)
106609 + continue;
106610 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
106611 + continue;
106612 +
106613 + // is it a call to k*alloc
106614 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
106615 + if (!is_kalloc(fnname))
106616 + continue;
106617 +
106618 + // is the size arg the result of a simple const assignment
106619 + size = gimple_call_arg(call_stmt, 0);
106620 + while (true) {
106621 + gimple def_stmt;
106622 + expanded_location xloc;
106623 + size_t size_val;
106624 +
106625 + if (TREE_CODE(size) != SSA_NAME)
106626 + break;
106627 + def_stmt = SSA_NAME_DEF_STMT(size);
106628 + if (!def_stmt || !is_gimple_assign(def_stmt))
106629 + break;
106630 + if (gimple_num_ops(def_stmt) != 2)
106631 + break;
106632 + size = gimple_assign_rhs1(def_stmt);
106633 + if (!TREE_CONSTANT(size))
106634 + continue;
106635 + xloc = expand_location(gimple_location(def_stmt));
106636 + if (!xloc.file)
106637 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
106638 + size_val = TREE_INT_CST_LOW(size);
106639 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
106640 + break;
106641 + }
106642 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
106643 +//debug_tree(gimple_call_fn(call_stmt));
106644 +//print_node(stderr, "pax", fndecl, 4);
106645 + }
106646 + }
106647 +
106648 + return 0;
106649 +}
106650 +
106651 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
106652 +{
106653 + const char * const plugin_name = plugin_info->base_name;
106654 + struct register_pass_info kallocstat_pass_info = {
106655 + .pass = &kallocstat_pass.pass,
106656 + .reference_pass_name = "ssa",
106657 + .ref_pass_instance_number = 0,
106658 + .pos_op = PASS_POS_INSERT_AFTER
106659 + };
106660 +
106661 + if (!plugin_default_version_check(version, &gcc_version)) {
106662 + error(G_("incompatible gcc/plugin versions"));
106663 + return 1;
106664 + }
106665 +
106666 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
106667 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
106668 +
106669 + return 0;
106670 +}
106671 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
106672 new file mode 100644
106673 index 0000000..008f159
106674 --- /dev/null
106675 +++ b/tools/gcc/kernexec_plugin.c
106676 @@ -0,0 +1,427 @@
106677 +/*
106678 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
106679 + * Licensed under the GPL v2
106680 + *
106681 + * Note: the choice of the license means that the compilation process is
106682 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
106683 + * but for the kernel it doesn't matter since it doesn't link against
106684 + * any of the gcc libraries
106685 + *
106686 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
106687 + *
106688 + * TODO:
106689 + *
106690 + * BUGS:
106691 + * - none known
106692 + */
106693 +#include "gcc-plugin.h"
106694 +#include "config.h"
106695 +#include "system.h"
106696 +#include "coretypes.h"
106697 +#include "tree.h"
106698 +#include "tree-pass.h"
106699 +#include "flags.h"
106700 +#include "intl.h"
106701 +#include "toplev.h"
106702 +#include "plugin.h"
106703 +//#include "expr.h" where are you...
106704 +#include "diagnostic.h"
106705 +#include "plugin-version.h"
106706 +#include "tm.h"
106707 +#include "function.h"
106708 +#include "basic-block.h"
106709 +#include "gimple.h"
106710 +#include "rtl.h"
106711 +#include "emit-rtl.h"
106712 +#include "tree-flow.h"
106713 +
106714 +extern void print_gimple_stmt(FILE *, gimple, int, int);
106715 +extern rtx emit_move_insn(rtx x, rtx y);
106716 +
106717 +int plugin_is_GPL_compatible;
106718 +
106719 +static struct plugin_info kernexec_plugin_info = {
106720 + .version = "201111291120",
106721 + .help = "method=[bts|or]\tinstrumentation method\n"
106722 +};
106723 +
106724 +static unsigned int execute_kernexec_reload(void);
106725 +static unsigned int execute_kernexec_fptr(void);
106726 +static unsigned int execute_kernexec_retaddr(void);
106727 +static bool kernexec_cmodel_check(void);
106728 +
106729 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
106730 +static void (*kernexec_instrument_retaddr)(rtx);
106731 +
106732 +static struct gimple_opt_pass kernexec_reload_pass = {
106733 + .pass = {
106734 + .type = GIMPLE_PASS,
106735 + .name = "kernexec_reload",
106736 + .gate = kernexec_cmodel_check,
106737 + .execute = execute_kernexec_reload,
106738 + .sub = NULL,
106739 + .next = NULL,
106740 + .static_pass_number = 0,
106741 + .tv_id = TV_NONE,
106742 + .properties_required = 0,
106743 + .properties_provided = 0,
106744 + .properties_destroyed = 0,
106745 + .todo_flags_start = 0,
106746 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
106747 + }
106748 +};
106749 +
106750 +static struct gimple_opt_pass kernexec_fptr_pass = {
106751 + .pass = {
106752 + .type = GIMPLE_PASS,
106753 + .name = "kernexec_fptr",
106754 + .gate = kernexec_cmodel_check,
106755 + .execute = execute_kernexec_fptr,
106756 + .sub = NULL,
106757 + .next = NULL,
106758 + .static_pass_number = 0,
106759 + .tv_id = TV_NONE,
106760 + .properties_required = 0,
106761 + .properties_provided = 0,
106762 + .properties_destroyed = 0,
106763 + .todo_flags_start = 0,
106764 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
106765 + }
106766 +};
106767 +
106768 +static struct rtl_opt_pass kernexec_retaddr_pass = {
106769 + .pass = {
106770 + .type = RTL_PASS,
106771 + .name = "kernexec_retaddr",
106772 + .gate = kernexec_cmodel_check,
106773 + .execute = execute_kernexec_retaddr,
106774 + .sub = NULL,
106775 + .next = NULL,
106776 + .static_pass_number = 0,
106777 + .tv_id = TV_NONE,
106778 + .properties_required = 0,
106779 + .properties_provided = 0,
106780 + .properties_destroyed = 0,
106781 + .todo_flags_start = 0,
106782 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
106783 + }
106784 +};
106785 +
106786 +static bool kernexec_cmodel_check(void)
106787 +{
106788 + tree section;
106789 +
106790 + if (ix86_cmodel != CM_KERNEL)
106791 + return false;
106792 +
106793 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
106794 + if (!section || !TREE_VALUE(section))
106795 + return true;
106796 +
106797 + section = TREE_VALUE(TREE_VALUE(section));
106798 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
106799 + return true;
106800 +
106801 + return false;
106802 +}
106803 +
106804 +/*
106805 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
106806 + */
106807 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
106808 +{
106809 + gimple asm_movabs_stmt;
106810 +
106811 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
106812 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
106813 + gimple_asm_set_volatile(asm_movabs_stmt, true);
106814 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
106815 + update_stmt(asm_movabs_stmt);
106816 +}
106817 +
106818 +/*
106819 + * find all asm() stmts that clobber r10 and add a reload of r10
106820 + */
106821 +static unsigned int execute_kernexec_reload(void)
106822 +{
106823 + basic_block bb;
106824 +
106825 + // 1. loop through BBs and GIMPLE statements
106826 + FOR_EACH_BB(bb) {
106827 + gimple_stmt_iterator gsi;
106828 +
106829 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
106830 + // gimple match: __asm__ ("" : : : "r10");
106831 + gimple asm_stmt;
106832 + size_t nclobbers;
106833 +
106834 + // is it an asm ...
106835 + asm_stmt = gsi_stmt(gsi);
106836 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
106837 + continue;
106838 +
106839 + // ... clobbering r10
106840 + nclobbers = gimple_asm_nclobbers(asm_stmt);
106841 + while (nclobbers--) {
106842 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
106843 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
106844 + continue;
106845 + kernexec_reload_fptr_mask(&gsi);
106846 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
106847 + break;
106848 + }
106849 + }
106850 + }
106851 +
106852 + return 0;
106853 +}
106854 +
106855 +/*
106856 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
106857 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
106858 + */
106859 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
106860 +{
106861 + gimple assign_intptr, assign_new_fptr, call_stmt;
106862 + tree intptr, old_fptr, new_fptr, kernexec_mask;
106863 +
106864 + call_stmt = gsi_stmt(*gsi);
106865 + old_fptr = gimple_call_fn(call_stmt);
106866 +
106867 + // create temporary unsigned long variable used for bitops and cast fptr to it
106868 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
106869 + add_referenced_var(intptr);
106870 + mark_sym_for_renaming(intptr);
106871 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
106872 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
106873 + update_stmt(assign_intptr);
106874 +
106875 + // apply logical or to temporary unsigned long and bitmask
106876 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
106877 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
106878 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
106879 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
106880 + update_stmt(assign_intptr);
106881 +
106882 + // cast temporary unsigned long back to a temporary fptr variable
106883 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
106884 + add_referenced_var(new_fptr);
106885 + mark_sym_for_renaming(new_fptr);
106886 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
106887 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
106888 + update_stmt(assign_new_fptr);
106889 +
106890 + // replace call stmt fn with the new fptr
106891 + gimple_call_set_fn(call_stmt, new_fptr);
106892 + update_stmt(call_stmt);
106893 +}
106894 +
106895 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
106896 +{
106897 + gimple asm_or_stmt, call_stmt;
106898 + tree old_fptr, new_fptr, input, output;
106899 + VEC(tree, gc) *inputs = NULL;
106900 + VEC(tree, gc) *outputs = NULL;
106901 +
106902 + call_stmt = gsi_stmt(*gsi);
106903 + old_fptr = gimple_call_fn(call_stmt);
106904 +
106905 + // create temporary fptr variable
106906 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
106907 + add_referenced_var(new_fptr);
106908 + mark_sym_for_renaming(new_fptr);
106909 +
106910 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
106911 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
106912 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
106913 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
106914 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
106915 + VEC_safe_push(tree, gc, inputs, input);
106916 + VEC_safe_push(tree, gc, outputs, output);
106917 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
106918 + gimple_asm_set_volatile(asm_or_stmt, true);
106919 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
106920 + update_stmt(asm_or_stmt);
106921 +
106922 + // replace call stmt fn with the new fptr
106923 + gimple_call_set_fn(call_stmt, new_fptr);
106924 + update_stmt(call_stmt);
106925 +}
106926 +
106927 +/*
106928 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
106929 + */
106930 +static unsigned int execute_kernexec_fptr(void)
106931 +{
106932 + basic_block bb;
106933 +
106934 + // 1. loop through BBs and GIMPLE statements
106935 + FOR_EACH_BB(bb) {
106936 + gimple_stmt_iterator gsi;
106937 +
106938 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
106939 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
106940 + tree fn;
106941 + gimple call_stmt;
106942 +
106943 + // is it a call ...
106944 + call_stmt = gsi_stmt(gsi);
106945 + if (!is_gimple_call(call_stmt))
106946 + continue;
106947 + fn = gimple_call_fn(call_stmt);
106948 + if (TREE_CODE(fn) == ADDR_EXPR)
106949 + continue;
106950 + if (TREE_CODE(fn) != SSA_NAME)
106951 + gcc_unreachable();
106952 +
106953 + // ... through a function pointer
106954 + fn = SSA_NAME_VAR(fn);
106955 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
106956 + continue;
106957 + fn = TREE_TYPE(fn);
106958 + if (TREE_CODE(fn) != POINTER_TYPE)
106959 + continue;
106960 + fn = TREE_TYPE(fn);
106961 + if (TREE_CODE(fn) != FUNCTION_TYPE)
106962 + continue;
106963 +
106964 + kernexec_instrument_fptr(&gsi);
106965 +
106966 +//debug_tree(gimple_call_fn(call_stmt));
106967 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
106968 + }
106969 + }
106970 +
106971 + return 0;
106972 +}
106973 +
106974 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
106975 +static void kernexec_instrument_retaddr_bts(rtx insn)
106976 +{
106977 + rtx btsq;
106978 + rtvec argvec, constraintvec, labelvec;
106979 + int line;
106980 +
106981 + // create asm volatile("btsq $63,(%%rsp)":::)
106982 + argvec = rtvec_alloc(0);
106983 + constraintvec = rtvec_alloc(0);
106984 + labelvec = rtvec_alloc(0);
106985 + line = expand_location(RTL_LOCATION(insn)).line;
106986 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
106987 + MEM_VOLATILE_P(btsq) = 1;
106988 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
106989 + emit_insn_before(btsq, insn);
106990 +}
106991 +
106992 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
106993 +static void kernexec_instrument_retaddr_or(rtx insn)
106994 +{
106995 + rtx orq;
106996 + rtvec argvec, constraintvec, labelvec;
106997 + int line;
106998 +
106999 + // create asm volatile("orq %%r10,(%%rsp)":::)
107000 + argvec = rtvec_alloc(0);
107001 + constraintvec = rtvec_alloc(0);
107002 + labelvec = rtvec_alloc(0);
107003 + line = expand_location(RTL_LOCATION(insn)).line;
107004 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
107005 + MEM_VOLATILE_P(orq) = 1;
107006 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
107007 + emit_insn_before(orq, insn);
107008 +}
107009 +
107010 +/*
107011 + * find all asm level function returns and forcibly set the highest bit of the return address
107012 + */
107013 +static unsigned int execute_kernexec_retaddr(void)
107014 +{
107015 + rtx insn;
107016 +
107017 + // 1. find function returns
107018 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
107019 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
107020 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
107021 + rtx body;
107022 +
107023 + // is it a retn
107024 + if (!JUMP_P(insn))
107025 + continue;
107026 + body = PATTERN(insn);
107027 + if (GET_CODE(body) == PARALLEL)
107028 + body = XVECEXP(body, 0, 0);
107029 + if (GET_CODE(body) != RETURN)
107030 + continue;
107031 + kernexec_instrument_retaddr(insn);
107032 + }
107033 +
107034 +// print_simple_rtl(stderr, get_insns());
107035 +// print_rtl(stderr, get_insns());
107036 +
107037 + return 0;
107038 +}
107039 +
107040 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
107041 +{
107042 + const char * const plugin_name = plugin_info->base_name;
107043 + const int argc = plugin_info->argc;
107044 + const struct plugin_argument * const argv = plugin_info->argv;
107045 + int i;
107046 + struct register_pass_info kernexec_reload_pass_info = {
107047 + .pass = &kernexec_reload_pass.pass,
107048 + .reference_pass_name = "ssa",
107049 + .ref_pass_instance_number = 0,
107050 + .pos_op = PASS_POS_INSERT_AFTER
107051 + };
107052 + struct register_pass_info kernexec_fptr_pass_info = {
107053 + .pass = &kernexec_fptr_pass.pass,
107054 + .reference_pass_name = "ssa",
107055 + .ref_pass_instance_number = 0,
107056 + .pos_op = PASS_POS_INSERT_AFTER
107057 + };
107058 + struct register_pass_info kernexec_retaddr_pass_info = {
107059 + .pass = &kernexec_retaddr_pass.pass,
107060 + .reference_pass_name = "pro_and_epilogue",
107061 + .ref_pass_instance_number = 0,
107062 + .pos_op = PASS_POS_INSERT_AFTER
107063 + };
107064 +
107065 + if (!plugin_default_version_check(version, &gcc_version)) {
107066 + error(G_("incompatible gcc/plugin versions"));
107067 + return 1;
107068 + }
107069 +
107070 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
107071 +
107072 + if (TARGET_64BIT == 0)
107073 + return 0;
107074 +
107075 + for (i = 0; i < argc; ++i) {
107076 + if (!strcmp(argv[i].key, "method")) {
107077 + if (!argv[i].value) {
107078 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
107079 + continue;
107080 + }
107081 + if (!strcmp(argv[i].value, "bts")) {
107082 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
107083 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
107084 + } else if (!strcmp(argv[i].value, "or")) {
107085 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
107086 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
107087 + fix_register("r10", 1, 1);
107088 + } else
107089 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
107090 + continue;
107091 + }
107092 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
107093 + }
107094 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
107095 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
107096 +
107097 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
107098 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
107099 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
107100 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
107101 +
107102 + return 0;
107103 +}
107104 diff --git a/tools/gcc/size_overflow_hash1.h b/tools/gcc/size_overflow_hash1.h
107105 new file mode 100644
107106 index 0000000..5b08f5c
107107 --- /dev/null
107108 +++ b/tools/gcc/size_overflow_hash1.h
107109 @@ -0,0 +1,1055 @@
107110 +struct size_overflow_hash size_overflow_hash1[65536] = {
107111 + [10167].file = "sound/core/oss/pcm_plugin.c",
107112 + [10167].name = "snd_pcm_plugin_build",
107113 + [10167].param5 = 1,
107114 + [1022].file = "sound/pci/rme9652/rme9652.c",
107115 + [1022].name = "snd_rme9652_playback_copy",
107116 + [1022].param5 = 1,
107117 + [10341].file = "fs/nfsd/nfs4xdr.c",
107118 + [10341].name = "read_buf",
107119 + [10341].param2 = 1,
107120 + [10496].file = "drivers/bluetooth/hci_vhci.c",
107121 + [10496].name = "vhci_read",
107122 + [10496].param3 = 1,
107123 + [10623].file = "drivers/infiniband/core/user_mad.c",
107124 + [10623].name = "ib_umad_write",
107125 + [10623].param3 = 1,
107126 + [10674].file = "drivers/mtd/mtdchar.c",
107127 + [10674].name = "mtd_do_writeoob",
107128 + [10674].param4 = 1,
107129 + [10773].file = "drivers/input/mousedev.c",
107130 + [10773].name = "mousedev_read",
107131 + [10773].param3 = 1,
107132 + [10776].file = "drivers/media/video/gspca/t613.c",
107133 + [10776].name = "reg_w_buf",
107134 + [10776].param3 = 1,
107135 + [10919].file = "net/ipv4/netfilter/arp_tables.c",
107136 + [10919].name = "do_arpt_set_ctl",
107137 + [10919].param4 = 1,
107138 + [11054].file = "drivers/net/wireless/libertas/debugfs.c",
107139 + [11054].name = "lbs_wrmac_write",
107140 + [11054].param3 = 1,
107141 + [11068].file = "drivers/net/wireless/libertas/debugfs.c",
107142 + [11068].name = "lbs_wrrf_write",
107143 + [11068].param3 = 1,
107144 + [11385].file = "net/tipc/socket.c",
107145 + [11385].name = "recv_msg",
107146 + [11385].param4 = 1,
107147 + [11402].file = "drivers/net/wireless/libertas/debugfs.c",
107148 + [11402].name = "lbs_threshold_write",
107149 + [11402].param5 = 1,
107150 + [11494].file = "drivers/video/via/viafbdev.c",
107151 + [11494].name = "viafb_dvp1_proc_write",
107152 + [11494].param3 = 1,
107153 + [11699].file = "drivers/net/vxge/vxge-config.h",
107154 + [11699].name = "vxge_os_dma_malloc",
107155 + [11699].param2 = 1,
107156 + [11986].file = "drivers/net/usb/asix.c",
107157 + [11986].name = "asix_read_cmd",
107158 + [11986].param5 = 1,
107159 + [12205].file = "fs/reiserfs/journal.c",
107160 + [12205].name = "reiserfs_allocate_list_bitmaps",
107161 + [12205].param3 = 1,
107162 + [1248].file = "kernel/kprobes.c",
107163 + [1248].name = "write_enabled_file_bool",
107164 + [1248].param3 = 1,
107165 + [12591].file = "sound/core/pcm_lib.c",
107166 + [12591].name = "snd_pcm_lib_writev_transfer",
107167 + [12591].param5 = 1,
107168 + [12755].file = "sound/drivers/opl4/opl4_proc.c",
107169 + [12755].name = "snd_opl4_mem_proc_read",
107170 + [12755].param5 = 1,
107171 + [12833].file = "net/sctp/auth.c",
107172 + [12833].name = "sctp_auth_create_key",
107173 + [12833].param1 = 1,
107174 + [12954].file = "fs/proc/base.c",
107175 + [12954].name = "oom_adjust_write",
107176 + [12954].param3 = 1,
107177 + [13121].file = "net/ipv4/ip_sockglue.c",
107178 + [13121].name = "do_ip_setsockopt",
107179 + [13121].param5 = 1,
107180 + [13863].file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
107181 + [13863].name = "rs_sta_dbgfs_scale_table_write",
107182 + [13863].param3 = 1,
107183 + [13924].file = "net/ipv4/netfilter/ip_tables.c",
107184 + [13924].name = "do_ipt_set_ctl",
107185 + [13924].param4 = 1,
107186 + [14019].file = "fs/cifs/dns_resolve.c",
107187 + [14019].name = "dns_resolver_instantiate",
107188 + [14019].param3 = 1,
107189 + [14025].file = "net/ax25/af_ax25.c",
107190 + [14025].name = "ax25_setsockopt",
107191 + [14025].param5 = 1,
107192 + [14031].file = "drivers/net/wireless/ath/ath5k/debug.c",
107193 + [14031].name = "write_file_beacon",
107194 + [14031].param3 = 1,
107195 + [14090].file = "drivers/bluetooth/btmrvl_debugfs.c",
107196 + [14090].name = "btmrvl_hsmode_write",
107197 + [14090].param3 = 1,
107198 + [14174].file = "sound/pci/es1938.c",
107199 + [14174].name = "snd_es1938_capture_copy",
107200 + [14174].param5 = 1,
107201 + [14299].file = "sound/core/oss/pcm_plugin.c",
107202 + [14299].name = "snd_pcm_plugin_alloc",
107203 + [14299].param2 = 1,
107204 + [14345].file = "fs/cachefiles/daemon.c",
107205 + [14345].name = "cachefiles_daemon_write",
107206 + [14345].param3 = 1,
107207 + [14347].file = "drivers/media/dvb/dvb-core/dvb_ca_en50221.c",
107208 + [14347].name = "dvb_ca_en50221_io_write",
107209 + [14347].param3 = 1,
107210 + [15071].file = "drivers/net/wireless/ipw2x00/libipw_module.c",
107211 + [15071].name = "store_debug_level",
107212 + [15071].param3 = 1,
107213 + [15112].file = "drivers/xen/evtchn.c",
107214 + [15112].name = "evtchn_write",
107215 + [15112].param3 = 1,
107216 + [15274].file = "crypto/shash.c",
107217 + [15274].name = "crypto_shash_setkey",
107218 + [15274].param3 = 1,
107219 + [15319].file = "net/netfilter/xt_recent.c",
107220 + [15319].name = "recent_old_proc_write",
107221 + [15319].param3 = 1,
107222 + [15891].file = "drivers/media/video/videobuf-dma-sg.c",
107223 + [15891].name = "__videobuf_alloc",
107224 + [15891].param1 = 1,
107225 + [1603].file = "fs/debugfs/file.c",
107226 + [1603].name = "write_file_bool",
107227 + [1603].param3 = 1,
107228 + [16073].file = "net/sctp/socket.c",
107229 + [16073].name = "sctp_setsockopt",
107230 + [16073].param5 = 1,
107231 + [16166].file = "drivers/platform/x86/thinkpad_acpi.c",
107232 + [16166].name = "dispatch_proc_write",
107233 + [16166].param3 = 1,
107234 + [16344].file = "lib/scatterlist.c",
107235 + [16344].name = "sg_kmalloc",
107236 + [16344].param1 = 1,
107237 + [16605].file = "fs/ecryptfs/miscdev.c",
107238 + [16605].name = "ecryptfs_send_miscdev",
107239 + [16605].param2 = 1,
107240 + [16606].file = "drivers/ide/ide-tape.c",
107241 + [16606].name = "idetape_chrdev_write",
107242 + [16606].param3 = 1,
107243 + [16758].file = "drivers/net/usb/pegasus.c",
107244 + [16758].name = "set_registers",
107245 + [16758].param3 = 1,
107246 + [16911].file = "drivers/media/dvb/ttpci/av7110_hw.c",
107247 + [16911].name = "LoadBitmap",
107248 + [16911].param2 = 1,
107249 + [17139].file = "fs/ubifs/xattr.c",
107250 + [17139].name = "ubifs_setxattr",
107251 + [17139].param4 = 1,
107252 + [17170].file = "drivers/media/video/zc0301/zc0301_core.c",
107253 + [17170].name = "zc0301_read",
107254 + [17170].param3 = 1,
107255 + [17224].file = "drivers/media/video/w9968cf.c",
107256 + [17224].name = "w9968cf_read",
107257 + [17224].param3 = 1,
107258 + [17377].file = "drivers/usb/class/cdc-wdm.c",
107259 + [17377].name = "wdm_write",
107260 + [17377].param3 = 1,
107261 + [17460].file = "fs/nfsd/nfscache.c",
107262 + [17460].name = "nfsd_cache_update",
107263 + [17460].param3 = 1,
107264 + [17492].file = "net/dccp/proto.c",
107265 + [17492].name = "do_dccp_setsockopt",
107266 + [17492].param5 = 1,
107267 + [17828].file = "kernel/sched.c",
107268 + [17828].name = "sched_feat_write",
107269 + [17828].param3 = 1,
107270 + [1800].file = "drivers/media/dvb/dvb-core/dmxdev.c",
107271 + [1800].name = "dvb_dvr_do_ioctl",
107272 + [1800].param4 = 1,
107273 + [18224].file = "drivers/xen/grant-table.c",
107274 + [18224].name = "gnttab_map",
107275 + [18224].param2 = 1,
107276 + [18232].file = "fs/nfs/write.c",
107277 + [18232].name = "nfs_writedata_alloc",
107278 + [18232].param1 = 1,
107279 + [18303].file = "fs/xattr.c",
107280 + [18303].name = "getxattr",
107281 + [18303].param4 = 1,
107282 + [18313].file = "drivers/platform/x86/toshiba_acpi.c",
107283 + [18313].name = "dispatch_write",
107284 + [18313].param3 = 1,
107285 + [18353].file = "net/rfkill/core.c",
107286 + [18353].name = "rfkill_fop_read",
107287 + [18353].param3 = 1,
107288 + [183].file = "crypto/ahash.c",
107289 + [183].name = "crypto_ahash_setkey",
107290 + [183].param3 = 1,
107291 + [1858].file = "net/ipv6/netfilter/ip6_tables.c",
107292 + [1858].name = "do_ip6t_set_ctl",
107293 + [1858].param4 = 1,
107294 + [18592].file = "drivers/base/platform.c",
107295 + [18592].name = "platform_device_add_resources",
107296 + [18592].param3 = 1,
107297 + [19012].file = "drivers/acpi/event.c",
107298 + [19012].name = "acpi_system_read_event",
107299 + [19012].param3 = 1,
107300 + [19261].file = "net/netlabel/netlabel_domainhash.c",
107301 + [19261].name = "netlbl_domhsh_init",
107302 + [19261].param1 = 1,
107303 + [19288].file = "net/ipv6/raw.c",
107304 + [19288].name = "rawv6_setsockopt",
107305 + [19288].param5 = 1,
107306 + [19504].file = "drivers/usb/serial/garmin_gps.c",
107307 + [19504].name = "pkt_add",
107308 + [19504].param3 = 1,
107309 + [19511].file = "drivers/scsi/cxgb3i/cxgb3i_ddp.c",
107310 + [19511].name = "cxgb3i_ddp_make_gl",
107311 + [19511].param1 = 1,
107312 + [19738].file = "fs/sysfs/file.c",
107313 + [19738].name = "sysfs_write_file",
107314 + [19738].param3 = 1,
107315 + [19909].file = "drivers/net/wireless/libertas/debugfs.c",
107316 + [19909].name = "lbs_sleepparams_write",
107317 + [19909].param3 = 1,
107318 + [19960].file = "drivers/usb/class/usblp.c",
107319 + [19960].name = "usblp_read",
107320 + [19960].param3 = 1,
107321 + [20023].file = "drivers/media/video/gspca/gspca.c",
107322 + [20023].name = "dev_read",
107323 + [20023].param3 = 1,
107324 + [20113].file = "drivers/net/wireless/libertas/debugfs.c",
107325 + [20113].name = "lbs_rdmac_write",
107326 + [20113].param3 = 1,
107327 + [20123].file = "drivers/ieee1394/csr1212.h",
107328 + [20123].name = "csr1212_rom_cache_malloc",
107329 + [20123].param2 = 1,
107330 + [20314].file = "drivers/gpu/drm/drm_hashtab.c",
107331 + [20314].name = "drm_ht_create",
107332 + [20314].param2 = 1,
107333 + [20611].file = "net/netfilter/x_tables.c",
107334 + [20611].name = "xt_alloc_table_info",
107335 + [20611].param1 = 1,
107336 + [20951].file = "crypto/rng.c",
107337 + [20951].name = "rngapi_reset",
107338 + [20951].param3 = 1,
107339 + [21134].file = "drivers/video/via/viafbdev.c",
107340 + [21134].name = "viafb_dfph_proc_write",
107341 + [21134].param3 = 1,
107342 + [21277].file = "drivers/usb/storage/shuttle_usbat.c",
107343 + [21277].name = "usbat_flash_write_data",
107344 + [21277].param4 = 1,
107345 + [21312].file = "lib/ts_kmp.c",
107346 + [21312].name = "kmp_init",
107347 + [21312].param2 = 1,
107348 + [21397].file = "net/core/sock.c",
107349 + [21397].name = "sock_setsockopt",
107350 + [21397].param5 = 1,
107351 + [21451].file = "net/netfilter/ipvs/ip_vs_ctl.c",
107352 + [21451].name = "do_ip_vs_set_ctl",
107353 + [21451].param4 = 1,
107354 + [21538].file = "net/bluetooth/l2cap.c",
107355 + [21538].name = "l2cap_sock_setsockopt",
107356 + [21538].param5 = 1,
107357 + [21608].file = "drivers/char/tpm/tpm.c",
107358 + [21608].name = "tpm_write",
107359 + [21608].param3 = 1,
107360 + [2180].file = "drivers/char/ppdev.c",
107361 + [2180].name = "pp_write",
107362 + [2180].param3 = 1,
107363 + [22173].file = "drivers/ieee1394/highlevel.c",
107364 + [22173].name = "hpsb_create_hostinfo",
107365 + [22173].param3 = 1,
107366 + [22190].file = "drivers/char/tpm/tpm.c",
107367 + [22190].name = "tpm_read",
107368 + [22190].param3 = 1,
107369 + [22291].file = "net/core/pktgen.c",
107370 + [22291].name = "pgctrl_write",
107371 + [22291].param3 = 1,
107372 + [22428].file = "ipc/ipc_sysctl.c",
107373 + [22428].name = "sysctl_ipc_data",
107374 + [22428].param5 = 1,
107375 + [2243].file = "drivers/scsi/scsi_tgt_lib.c",
107376 + [2243].name = "scsi_tgt_kspace_exec",
107377 + [2243].param8 = 1,
107378 + [22546].file = "drivers/char/pcmcia/cm4040_cs.c",
107379 + [22546].name = "cm4040_read",
107380 + [22546].param3 = 1,
107381 + [23093].file = "drivers/scsi/st.c",
107382 + [23093].name = "st_read",
107383 + [23093].param3 = 1,
107384 + [2324].file = "net/ieee802154/wpan-class.c",
107385 + [2324].name = "wpan_phy_alloc",
107386 + [2324].param1 = 1,
107387 + [23535].file = "ipc/sem.c",
107388 + [23535].name = "sys_semtimedop",
107389 + [23535].param3 = 1,
107390 + [2386].file = "drivers/acpi/acpica/exnames.c",
107391 + [2386].name = "acpi_ex_allocate_name_string",
107392 + [2386].param2 = 1,
107393 + [23883].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
107394 + [23883].name = "iwl_dbgfs_interrupt_write",
107395 + [23883].param3 = 1,
107396 + [23999].file = "sound/pci/rme9652/hdsp.c",
107397 + [23999].name = "snd_hdsp_capture_copy",
107398 + [23999].param5 = 1,
107399 + [24263].file = "kernel/cgroup.c",
107400 + [24263].name = "cgroup_file_write",
107401 + [24263].param3 = 1,
107402 + [24549].file = "drivers/infiniband/core/ucm.c",
107403 + [24549].name = "ib_ucm_alloc_data",
107404 + [24549].param3 = 1,
107405 + [24719].file = "drivers/input/evdev.c",
107406 + [24719].name = "bits_to_user",
107407 + [24719].param2 = 1,
107408 + [24719].param3 = 1,
107409 + [24805].file = "security/keys/user_defined.c",
107410 + [24805].name = "user_update",
107411 + [24805].param3 = 1,
107412 + [25127].file = "drivers/scsi/device_handler/scsi_dh_alua.c",
107413 + [25127].name = "realloc_buffer",
107414 + [25127].param2 = 1,
107415 + [25158].file = "drivers/net/mlx4/en_rx.c",
107416 + [25158].name = "mlx4_en_create_rx_ring",
107417 + [25158].param3 = 1,
107418 + [25267].file = "fs/configfs/file.c",
107419 + [25267].name = "configfs_write_file",
107420 + [25267].param3 = 1,
107421 + [25558].file = "fs/proc/task_mmu.c",
107422 + [25558].name = "clear_refs_write",
107423 + [25558].param3 = 1,
107424 + [25884].file = "drivers/net/wireless/zd1211rw/zd_usb.c",
107425 + [25884].name = "zd_usb_ioread16v",
107426 + [25884].param4 = 1,
107427 + [26256].file = "fs/hpfs/name.c",
107428 + [26256].name = "hpfs_translate_name",
107429 + [26256].param3 = 1,
107430 + [26560].file = "crypto/algapi.c",
107431 + [26560].name = "crypto_alloc_instance2",
107432 + [26560].param3 = 1,
107433 + [26701].file = "drivers/mtd/chips/cfi_util.c",
107434 + [26701].name = "cfi_read_pri",
107435 + [26701].param3 = 1,
107436 + [26912].file = "drivers/ieee1394/raw1394.c",
107437 + [26912].name = "arm_write",
107438 + [26912].param6 = 1,
107439 + [26962].file = "drivers/usb/class/usbtmc.c",
107440 + [26962].name = "usbtmc_write",
107441 + [26962].param3 = 1,
107442 + [27004].file = "drivers/misc/hpilo.c",
107443 + [27004].name = "ilo_write",
107444 + [27004].param3 = 1,
107445 + [2711].file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
107446 + [2711].name = "dvb_ringbuffer_read_user",
107447 + [2711].param3 = 1,
107448 + [27129].file = "fs/lockd/mon.c",
107449 + [27129].name = "nsm_get_handle",
107450 + [27129].param4 = 1,
107451 + [27280].file = "drivers/net/mlx4/en_tx.c",
107452 + [27280].name = "mlx4_en_create_tx_ring",
107453 + [27280].param3 = 1,
107454 + [27290].file = "security/selinux/ss/services.c",
107455 + [27290].name = "security_context_to_sid_core",
107456 + [27290].param2 = 1,
107457 + [27302].file = "fs/proc/base.c",
107458 + [27302].name = "proc_loginuid_write",
107459 + [27302].param3 = 1,
107460 + [27347].file = "drivers/net/wireless/zd1211rw/zd_usb.c",
107461 + [27347].name = "zd_usb_rfwrite",
107462 + [27347].param3 = 1,
107463 + [27491].file = "fs/proc/base.c",
107464 + [27491].name = "proc_pid_attr_write",
107465 + [27491].param3 = 1,
107466 + [28092].file = "fs/select.c",
107467 + [28092].name = "do_sys_poll",
107468 + [28092].param2 = 1,
107469 + [28126].file = "drivers/net/wireless/zd1211rw/zd_chip.c",
107470 + [28126].name = "zd_ioread32v_locked",
107471 + [28126].param4 = 1,
107472 + [28370].file = "kernel/sysctl.c",
107473 + [28370].name = "sysctl_string",
107474 + [28370].param5 = 1,
107475 + [28462].file = "net/rfkill/core.c",
107476 + [28462].name = "rfkill_fop_write",
107477 + [28462].param3 = 1,
107478 + [28635].file = "drivers/gpu/drm/drm_sman.c",
107479 + [28635].name = "drm_sman_init",
107480 + [28635].param2 = 1,
107481 + [28655].file = "drivers/infiniband/hw/mthca/mthca_allocator.c",
107482 + [28655].name = "mthca_alloc_init",
107483 + [28655].param2 = 1,
107484 + [28879].file = "drivers/base/map.c",
107485 + [28879].name = "kobj_map",
107486 + [28879].param2 = 1,
107487 + [28889].file = "drivers/char/pcmcia/cm4040_cs.c",
107488 + [28889].name = "cm4040_write",
107489 + [28889].param3 = 1,
107490 + [28892].file = "drivers/media/video/se401.c",
107491 + [28892].name = "se401_read",
107492 + [28892].param3 = 1,
107493 + [29366].file = "drivers/char/pcmcia/cm4000_cs.c",
107494 + [29366].name = "cmm_read",
107495 + [29366].param3 = 1,
107496 + [29875].file = "sound/isa/gus/gus_pcm.c",
107497 + [29875].name = "snd_gf1_pcm_playback_copy",
107498 + [29875].param5 = 1,
107499 + [2995].file = "mm/page_alloc.c",
107500 + [2995].name = "alloc_large_system_hash",
107501 + [2995].param2 = 1,
107502 + [30438].file = "mm/filemap_xip.c",
107503 + [30438].name = "xip_file_read",
107504 + [30438].param3 = 1,
107505 + [30449].file = "drivers/telephony/ixj.c",
107506 + [30449].name = "ixj_read",
107507 + [30449].param3 = 1,
107508 + [30494].file = "fs/nilfs2/ioctl.c",
107509 + [30494].name = "nilfs_ioctl_wrap_copy",
107510 + [30494].param4 = 1,
107511 + [31348].file = "kernel/sched.c",
107512 + [31348].name = "sys_sched_getaffinity",
107513 + [31348].param2 = 1,
107514 + [31465].file = "net/rds/message.c",
107515 + [31465].name = "rds_message_map_pages",
107516 + [31465].param2 = 1,
107517 + [31492].file = "drivers/hid/hidraw.c",
107518 + [31492].name = "hidraw_read",
107519 + [31492].param3 = 1,
107520 + [3170].file = "security/integrity/ima/ima_fs.c",
107521 + [3170].name = "ima_write_policy",
107522 + [3170].param3 = 1,
107523 + [31730].file = "net/dccp/proto.c",
107524 + [31730].name = "dccp_setsockopt",
107525 + [31730].param5 = 1,
107526 + [31789].file = "fs/file.c",
107527 + [31789].name = "alloc_fdmem",
107528 + [31789].param1 = 1,
107529 + [31957].file = "fs/afs/proc.c",
107530 + [31957].name = "afs_proc_cells_write",
107531 + [31957].param3 = 1,
107532 + [32326].file = "drivers/char/n_r3964.c",
107533 + [32326].name = "r3964_write",
107534 + [32326].param4 = 1,
107535 + [32950].file = "fs/reiserfs/resize.c",
107536 + [32950].name = "reiserfs_resize",
107537 + [32950].param2 = 1,
107538 + [33256].file = "drivers/ieee1394/raw1394.c",
107539 + [33256].name = "arm_read",
107540 + [33256].param5 = 1,
107541 + [33637].file = "net/9p/client.c",
107542 + [33637].name = "p9_client_read",
107543 + [33637].param5 = 1,
107544 + [33669].file = "fs/gfs2/glock.c",
107545 + [33669].name = "gfs2_glock_nq_m",
107546 + [33669].param1 = 1,
107547 + [3384].file = "drivers/block/paride/pg.c",
107548 + [3384].name = "pg_write",
107549 + [3384].param3 = 1,
107550 + [34105].file = "fs/libfs.c",
107551 + [34105].name = "simple_read_from_buffer",
107552 + [34105].param5 = 1,
107553 + [34120].file = "drivers/media/video/pvrusb2/pvrusb2-io.c",
107554 + [34120].name = "pvr2_stream_buffer_count",
107555 + [34120].param2 = 1,
107556 + [34672].file = "drivers/char/tty_io.c",
107557 + [34672].name = "tty_write",
107558 + [34672].param3 = 1,
107559 + [34863].file = "drivers/video/fbsysfs.c",
107560 + [34863].name = "framebuffer_alloc",
107561 + [34863].param1 = 1,
107562 + [34988].file = "drivers/net/wireless/libertas/debugfs.c",
107563 + [34988].name = "lbs_rdrf_write",
107564 + [34988].param3 = 1,
107565 + [35007].file = "drivers/usb/mon/mon_bin.c",
107566 + [35007].name = "mon_bin_read",
107567 + [35007].param3 = 1,
107568 + [35050].file = "fs/ocfs2/dlm/dlmfs.c",
107569 + [35050].name = "dlmfs_file_write",
107570 + [35050].param3 = 1,
107571 + [35176].file = "drivers/usb/misc/ldusb.c",
107572 + [35176].name = "ld_usb_write",
107573 + [35176].param3 = 1,
107574 + [35268].file = "security/keys/request_key_auth.c",
107575 + [35268].name = "request_key_auth_read",
107576 + [35268].param3 = 1,
107577 + [35731].file = "drivers/usb/class/cdc-wdm.c",
107578 + [35731].name = "wdm_read",
107579 + [35731].param3 = 1,
107580 + [36284].file = "drivers/spi/spi.c",
107581 + [36284].name = "spi_register_board_info",
107582 + [36284].param2 = 1,
107583 + [3632].file = "drivers/firewire/core-cdev.c",
107584 + [3632].name = "fw_device_op_read",
107585 + [3632].param3 = 1,
107586 + [36807].file = "drivers/usb/mon/mon_bin.c",
107587 + [36807].name = "mon_bin_get_event",
107588 + [36807].param4 = 1,
107589 + [36822].file = "kernel/sysctl.c",
107590 + [36822].name = "sysctl_data",
107591 + [36822].param5 = 1,
107592 + [36981].file = "drivers/video/via/viafbdev.c",
107593 + [36981].name = "viafb_dfpl_proc_write",
107594 + [36981].param3 = 1,
107595 + [37204].file = "drivers/isdn/hardware/eicon/divasi.c",
107596 + [37204].name = "um_idi_read",
107597 + [37204].param3 = 1,
107598 + [37233].file = "fs/ocfs2/cluster/tcp.c",
107599 + [37233].name = "o2net_send_message_vec",
107600 + [37233].param4 = 1,
107601 + [37309].file = "drivers/mtd/mtdchar.c",
107602 + [37309].name = "mtd_do_readoob",
107603 + [37309].param3 = 1,
107604 + [37594].file = "include/linux/poll.h",
107605 + [37594].name = "get_fd_set",
107606 + [37594].param1 = 1,
107607 + [37611].file = "drivers/xen/xenbus/xenbus_xs.c",
107608 + [37611].name = "split",
107609 + [37611].param2 = 1,
107610 + [37661].file = "mm/filemap.c",
107611 + [37661].name = "file_read_actor",
107612 + [37661].param4 = 1,
107613 + [38109].file = "drivers/media/video/cafe_ccic.c",
107614 + [38109].name = "cafe_deliver_buffer",
107615 + [38109].param3 = 1,
107616 + [38401].file = "drivers/xen/xenfs/xenbus.c",
107617 + [38401].name = "queue_reply",
107618 + [38401].param3 = 1,
107619 + [38576].file = "drivers/i2c/i2c-dev.c",
107620 + [38576].name = "i2cdev_read",
107621 + [38576].param3 = 1,
107622 + [39001].file = "net/xfrm/xfrm_hash.c",
107623 + [39001].name = "xfrm_hash_alloc",
107624 + [39001].param1 = 1,
107625 + [39147].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
107626 + [39147].name = "iwl_dbgfs_rx_statistics_write",
107627 + [39147].param3 = 1,
107628 + [39231].file = "drivers/mtd/mtdconcat.c",
107629 + [39231].name = "concat_writev",
107630 + [39231].param3 = 1,
107631 + [39254].file = "drivers/char/pcmcia/cm4000_cs.c",
107632 + [39254].name = "cmm_write",
107633 + [39254].param3 = 1,
107634 + [39479].file = "drivers/ide/ide-tape.c",
107635 + [39479].name = "idetape_chrdev_read",
107636 + [39479].param3 = 1,
107637 + [40049].file = "drivers/bluetooth/btmrvl_debugfs.c",
107638 + [40049].name = "btmrvl_psmode_write",
107639 + [40049].param3 = 1,
107640 + [40075].file = "drivers/media/video/c-qcam.c",
107641 + [40075].name = "qc_capture",
107642 + [40075].param3 = 1,
107643 + [40161].file = "net/sunrpc/xprtsock.c",
107644 + [40161].name = "xs_setup_xprt",
107645 + [40161].param2 = 1,
107646 + [40578].file = "sound/soc/soc-core.c",
107647 + [40578].name = "codec_reg_write_file",
107648 + [40578].param3 = 1,
107649 + [40609].file = "sound/pci/rme9652/hdspm.c",
107650 + [40609].name = "snd_hdspm_playback_copy",
107651 + [40609].param5 = 1,
107652 + [40713].file = "net/mac80211/debugfs.c",
107653 + [40713].name = "noack_write",
107654 + [40713].param3 = 1,
107655 + [40786].file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
107656 + [40786].name = "asn1_octets_decode",
107657 + [40786].param2 = 1,
107658 + [40951].file = "drivers/xen/evtchn.c",
107659 + [40951].name = "evtchn_read",
107660 + [40951].param3 = 1,
107661 + [40952].file = "drivers/misc/sgi-xp/xpc_partition.c",
107662 + [40952].name = "xpc_kmalloc_cacheline_aligned",
107663 + [40952].param1 = 1,
107664 + [41000].file = "sound/core/pcm_native.c",
107665 + [41000].name = "snd_pcm_aio_read",
107666 + [41000].param3 = 1,
107667 + [41056].file = "net/sunrpc/auth_gss/auth_gss.c",
107668 + [41056].name = "gss_pipe_upcall",
107669 + [41056].param4 = 1,
107670 + [41230].file = "drivers/usb/storage/datafab.c",
107671 + [41230].name = "datafab_read_data",
107672 + [41230].param4 = 1,
107673 + [41249].file = "drivers/media/video/zr364xx.c",
107674 + [41249].name = "send_control_msg",
107675 + [41249].param6 = 1,
107676 + [41418].file = "fs/libfs.c",
107677 + [41418].name = "simple_attr_write",
107678 + [41418].param3 = 1,
107679 + [4155].file = "kernel/kexec.c",
107680 + [4155].name = "do_kimage_alloc",
107681 + [4155].param3 = 1,
107682 + [41592].file = "net/sctp/ssnmap.c",
107683 + [41592].name = "sctp_ssnmap_new",
107684 + [41592].param1 = 1,
107685 + [41592].param2 = 1,
107686 + [4200].file = "fs/squashfs/id.c",
107687 + [4200].name = "squashfs_read_id_index_table",
107688 + [4200].param3 = 1,
107689 + [42420].file = "drivers/net/wireless/hostap/hostap_ioctl.c",
107690 + [42420].name = "prism2_set_genericelement",
107691 + [42420].param3 = 1,
107692 + [42483].file = "drivers/media/video/videobuf-dma-sg.c",
107693 + [42483].name = "videobuf_dma_init_user_locked",
107694 + [42483].param3 = 1,
107695 + [42666].file = "drivers/pcmcia/cistpl.c",
107696 + [42666].name = "read_cis_cache",
107697 + [42666].param4 = 1,
107698 + [42808].file = "drivers/net/cxgb3/sge.c",
107699 + [42808].name = "alloc_ring",
107700 + [42808].param4 = 1,
107701 + [42882].file = "security/keys/user_defined.c",
107702 + [42882].name = "user_instantiate",
107703 + [42882].param3 = 1,
107704 + [43393].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
107705 + [43393].name = "iwl_dbgfs_sram_write",
107706 + [43393].param3 = 1,
107707 + [43515].file = "drivers/usb/storage/jumpshot.c",
107708 + [43515].name = "jumpshot_read_data",
107709 + [43515].param4 = 1,
107710 + [44180].file = "drivers/video/via/viafbdev.c",
107711 + [44180].name = "viafb_vt1636_proc_write",
107712 + [44180].param3 = 1,
107713 + [44290].file = "drivers/net/usb/dm9601.c",
107714 + [44290].name = "dm_read",
107715 + [44290].param3 = 1,
107716 + [44298].file = "drivers/scsi/pmcraid.c",
107717 + [44298].name = "pmcraid_copy_sglist",
107718 + [44298].param3 = 1,
107719 + [44649].file = "mm/page_cgroup.c",
107720 + [44649].name = "swap_cgroup_swapon",
107721 + [44649].param2 = 1,
107722 + [44825].file = "drivers/scsi/osd/osd_initiator.c",
107723 + [44825].name = "_osd_realloc_seg",
107724 + [44825].param3 = 1,
107725 + [45000].file = "fs/afs/proc.c",
107726 + [45000].name = "afs_proc_rootcell_write",
107727 + [45000].param3 = 1,
107728 + [45231].file = "fs/ecryptfs/crypto.c",
107729 + [45231].name = "ecryptfs_copy_filename",
107730 + [45231].param4 = 1,
107731 + [45244].file = "drivers/mfd/ab3100-core.c",
107732 + [45244].name = "ab3100_get_set_reg",
107733 + [45244].param3 = 1,
107734 + [45576].file = "net/netfilter/xt_recent.c",
107735 + [45576].name = "recent_mt_proc_write",
107736 + [45576].param3 = 1,
107737 + [45583].file = "fs/gfs2/dir.c",
107738 + [45583].name = "leaf_dealloc",
107739 + [45583].param3 = 1,
107740 + [45954].file = "drivers/usb/misc/legousbtower.c",
107741 + [45954].name = "tower_write",
107742 + [45954].param3 = 1,
107743 + [45976].file = "net/core/dev.c",
107744 + [45976].name = "alloc_netdev_mq",
107745 + [45976].param4 = 1,
107746 + [46138].file = "fs/btrfs/file.c",
107747 + [46138].name = "btrfs_file_write",
107748 + [46138].param3 = 1,
107749 + [4614].file = "sound/core/pcm_lib.c",
107750 + [4614].name = "snd_pcm_lib_write_transfer",
107751 + [4614].param5 = 1,
107752 + [46243].file = "fs/binfmt_misc.c",
107753 + [46243].name = "bm_register_write",
107754 + [46243].param3 = 1,
107755 + [46343].file = "fs/compat.c",
107756 + [46343].name = "compat_do_readv_writev",
107757 + [46343].param4 = 1,
107758 + [4644].file = "drivers/net/usb/mcs7830.c",
107759 + [4644].name = "mcs7830_get_reg",
107760 + [4644].param3 = 1,
107761 + [46630].file = "net/decnet/af_decnet.c",
107762 + [46630].name = "__dn_setsockopt",
107763 + [46630].param5 = 1,
107764 + [46881].file = "drivers/char/lp.c",
107765 + [46881].name = "lp_write",
107766 + [46881].param3 = 1,
107767 + [47385].file = "drivers/net/wireless/zd1211rw/zd_usb.c",
107768 + [47385].name = "zd_usb_iowrite16v",
107769 + [47385].param3 = 1,
107770 + [47499].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
107771 + [47499].name = "iwl_dbgfs_tx_statistics_write",
107772 + [47499].param3 = 1,
107773 + [47850].file = "fs/cifs/cifssmb.c",
107774 + [47850].name = "CIFSSMBWrite",
107775 + [47850].param4 = 1,
107776 + [48182].file = "crypto/cryptd.c",
107777 + [48182].name = "cryptd_alloc_instance",
107778 + [48182].param2 = 1,
107779 + [49263].file = "drivers/net/wireless/ath/ath9k/debug.c",
107780 + [49263].name = "write_file_wiphy",
107781 + [49263].param3 = 1,
107782 + [49354].file = "drivers/media/video/cx18/cx18-fileops.c",
107783 + [49354].name = "cx18_v4l2_read",
107784 + [49354].param3 = 1,
107785 + [49448].file = "drivers/isdn/gigaset/common.c",
107786 + [49448].name = "gigaset_initdriver",
107787 + [49448].param2 = 1,
107788 + [49494].file = "drivers/virtio/virtio_ring.c",
107789 + [49494].name = "vring_new_virtqueue",
107790 + [49494].param1 = 1,
107791 + [49663].file = "drivers/media/video/uvc/uvc_driver.c",
107792 + [49663].name = "uvc_simplify_fraction",
107793 + [49663].param3 = 1,
107794 + [49780].file = "net/mac80211/key.c",
107795 + [49780].name = "ieee80211_key_alloc",
107796 + [49780].param3 = 1,
107797 + [49805].file = "drivers/pci/pci.c",
107798 + [49805].name = "pci_add_cap_save_buffer",
107799 + [49805].param3 = 1,
107800 + [49945].file = "drivers/ieee1394/hosts.c",
107801 + [49945].name = "hpsb_alloc_host",
107802 + [49945].param2 = 1,
107803 + [50001].file = "sound/pci/ctxfi/ctresource.c",
107804 + [50001].name = "rsc_mgr_init",
107805 + [50001].param3 = 1,
107806 + [50022].file = "drivers/usb/storage/shuttle_usbat.c",
107807 + [50022].name = "usbat_flash_read_data",
107808 + [50022].param4 = 1,
107809 + [50096].file = "drivers/net/wireless/libertas/debugfs.c",
107810 + [50096].name = "lbs_rdbbp_write",
107811 + [50096].param3 = 1,
107812 + [50102].file = "drivers/telephony/ixj.c",
107813 + [50102].name = "ixj_write",
107814 + [50102].param3 = 1,
107815 + [5052].file = "drivers/char/ppdev.c",
107816 + [5052].name = "pp_read",
107817 + [5052].param3 = 1,
107818 + [50562].file = "drivers/media/video/zoran/zoran_procfs.c",
107819 + [50562].name = "zoran_write",
107820 + [50562].param3 = 1,
107821 + [50692].file = "lib/ts_bm.c",
107822 + [50692].name = "bm_init",
107823 + [50692].param2 = 1,
107824 + [51052].file = "drivers/base/firmware_class.c",
107825 + [51052].name = "firmware_data_write",
107826 + [51052].param5 = 1,
107827 + [51177].file = "net/sunrpc/xprtrdma/transport.c",
107828 + [51177].name = "xprt_rdma_allocate",
107829 + [51177].param2 = 1,
107830 + [51250].file = "fs/read_write.c",
107831 + [51250].name = "rw_copy_check_uvector",
107832 + [51250].param3 = 1,
107833 + [51323].file = "sound/pci/ac97/ac97_pcm.c",
107834 + [51323].name = "snd_ac97_pcm_assign",
107835 + [51323].param2 = 1,
107836 + [51340].file = "drivers/usb/class/usblp.c",
107837 + [51340].name = "usblp_write",
107838 + [51340].param3 = 1,
107839 + [51464].file = "drivers/i2c/i2c-dev.c",
107840 + [51464].name = "i2cdev_write",
107841 + [51464].param3 = 1,
107842 + [51855].file = "net/rds/message.c",
107843 + [51855].name = "rds_message_copy_from_user",
107844 + [51855].param2 = 1,
107845 + [5197].file = "net/core/dev.c",
107846 + [5197].name = "dev_set_alias",
107847 + [5197].param3 = 1,
107848 + [52173].file = "drivers/misc/ibmasm/ibmasmfs.c",
107849 + [52173].name = "remote_settings_file_write",
107850 + [52173].param3 = 1,
107851 + [52201].file = "drivers/video/via/viafbdev.c",
107852 + [52201].name = "viafb_dvp0_proc_write",
107853 + [52201].param3 = 1,
107854 + [5233].file = "include/linux/poll.h",
107855 + [5233].name = "set_fd_set",
107856 + [5233].param1 = 1,
107857 + [52364].file = "sound/core/pcm_lib.c",
107858 + [52364].name = "snd_pcm_lib_readv_transfer",
107859 + [52364].param5 = 1,
107860 + [52589].file = "drivers/xen/xenfs/xenbus.c",
107861 + [52589].name = "xenbus_file_read",
107862 + [52589].param3 = 1,
107863 + [52699].file = "lib/ts_fsm.c",
107864 + [52699].name = "fsm_init",
107865 + [52699].param2 = 1,
107866 + [5313].file = "fs/gfs2/quota.c",
107867 + [5313].name = "do_sync",
107868 + [5313].param1 = 1,
107869 + [5344].file = "security/selinux/ss/hashtab.c",
107870 + [5344].name = "hashtab_create",
107871 + [5344].param3 = 1,
107872 + [53626].file = "drivers/block/paride/pg.c",
107873 + [53626].name = "pg_read",
107874 + [53626].param3 = 1,
107875 + [53644].file = "net/mac80211/rc80211_minstrel_debugfs.c",
107876 + [53644].name = "minstrel_stats_read",
107877 + [53644].param3 = 1,
107878 + [5389].file = "drivers/infiniband/core/uverbs_cmd.c",
107879 + [5389].name = "ib_uverbs_unmarshall_recv",
107880 + [5389].param5 = 1,
107881 + [53901].file = "net/rds/message.c",
107882 + [53901].name = "rds_message_alloc",
107883 + [53901].param1 = 1,
107884 + [54298].file = "drivers/usb/wusbcore/crypto.c",
107885 + [54298].name = "wusb_ccm_mac",
107886 + [54298].param7 = 1,
107887 + [54335].file = "drivers/md/dm-table.c",
107888 + [54335].name = "dm_vcalloc",
107889 + [54335].param2 = 1,
107890 + [54427].file = "drivers/usb/storage/jumpshot.c",
107891 + [54427].name = "jumpshot_write_data",
107892 + [54427].param4 = 1,
107893 + [54467].file = "net/packet/af_packet.c",
107894 + [54467].name = "packet_setsockopt",
107895 + [54467].param5 = 1,
107896 + [54643].file = "drivers/isdn/hardware/eicon/divasi.c",
107897 + [54643].name = "um_idi_write",
107898 + [54643].param3 = 1,
107899 + [54657].file = "mm/migrate.c",
107900 + [54657].name = "do_pages_stat",
107901 + [54657].param2 = 1,
107902 + [54663].file = "drivers/isdn/hardware/eicon/platform.h",
107903 + [54663].name = "diva_os_malloc",
107904 + [54663].param2 = 1,
107905 + [54780].file = "drivers/net/wireless/zd1211rw/zd_chip.c",
107906 + [54780].name = "_zd_iowrite32v_locked",
107907 + [54780].param3 = 1,
107908 + [55066].file = "net/ipv6/ipv6_sockglue.c",
107909 + [55066].name = "do_ipv6_setsockopt",
107910 + [55066].param5 = 1,
107911 + [55081].file = "drivers/virtio/virtio_ring.c",
107912 + [55081].name = "vring_add_buf",
107913 + [55081].param4 = 1,
107914 + [55105].file = "drivers/base/devres.c",
107915 + [55105].name = "devres_alloc",
107916 + [55105].param2 = 1,
107917 + [55155].file = "net/bluetooth/rfcomm/sock.c",
107918 + [55155].name = "rfcomm_sock_setsockopt",
107919 + [55155].param5 = 1,
107920 + [55608].file = "net/sctp/socket.c",
107921 + [55608].name = "sctp_setsockopt_auth_key",
107922 + [55608].param3 = 1,
107923 + [56471].file = "include/linux/slab.h",
107924 + [56471].name = "kcalloc",
107925 + [56471].param1 = 1,
107926 + [56471].param2 = 1,
107927 + [5661].file = "lib/dma-debug.c",
107928 + [5661].name = "filter_write",
107929 + [5661].param3 = 1,
107930 + [57471].file = "drivers/media/video/sn9c102/sn9c102_core.c",
107931 + [57471].name = "sn9c102_read",
107932 + [57471].param3 = 1,
107933 + [57670].file = "drivers/bluetooth/btmrvl_debugfs.c",
107934 + [57670].name = "btmrvl_pscmd_write",
107935 + [57670].param3 = 1,
107936 + [57724].file = "net/bluetooth/hci_sock.c",
107937 + [57724].name = "hci_sock_setsockopt",
107938 + [57724].param5 = 1,
107939 + [58043].file = "kernel/auditfilter.c",
107940 + [58043].name = "audit_unpack_string",
107941 + [58043].param3 = 1,
107942 + [58107].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
107943 + [58107].name = "iwl_dbgfs_sleep_level_override_write",
107944 + [58107].param3 = 1,
107945 + [58263].file = "security/keys/keyring.c",
107946 + [58263].name = "keyring_read",
107947 + [58263].param3 = 1,
107948 + [58278].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
107949 + [58278].name = "iwl_dbgfs_log_event_write",
107950 + [58278].param3 = 1,
107951 + [5827].file = "drivers/net/wireless/ray_cs.c",
107952 + [5827].name = "write_essid",
107953 + [5827].param3 = 1,
107954 + [58769].file = "drivers/net/wireless/zd1211rw/zd_usb.c",
107955 + [58769].name = "zd_usb_read_fw",
107956 + [58769].param4 = 1,
107957 + [58878].file = "drivers/net/wireless/libertas/debugfs.c",
107958 + [58878].name = "lbs_wrbbp_write",
107959 + [58878].param3 = 1,
107960 + [58888].file = "fs/xattr.c",
107961 + [58888].name = "listxattr",
107962 + [58888].param3 = 1,
107963 + [58918].file = "sound/core/pcm_native.c",
107964 + [58918].name = "snd_pcm_aio_write",
107965 + [58918].param3 = 1,
107966 + [58919].file = "net/netlabel/netlabel_unlabeled.c",
107967 + [58919].name = "netlbl_unlabel_init",
107968 + [58919].param1 = 1,
107969 + [58942].file = "drivers/block/aoe/aoedev.c",
107970 + [58942].name = "aoedev_flush",
107971 + [58942].param2 = 1,
107972 + [59270].file = "net/tipc/socket.c",
107973 + [59270].name = "recv_stream",
107974 + [59270].param4 = 1,
107975 + [59639].file = "drivers/media/video/stv680.c",
107976 + [59639].name = "stv680_read",
107977 + [59639].param3 = 1,
107978 + [5968].file = "net/sunrpc/sched.c",
107979 + [5968].name = "rpc_malloc",
107980 + [5968].param2 = 1,
107981 + [59794].file = "mm/mincore.c",
107982 + [59794].name = "sys_mincore",
107983 + [59794].param2 = 1,
107984 + [59856].file = "drivers/base/devres.c",
107985 + [59856].name = "devm_kzalloc",
107986 + [59856].param2 = 1,
107987 + [59877].file = "sound/pci/rme9652/hdspm.c",
107988 + [59877].name = "snd_hdspm_capture_copy",
107989 + [59877].param5 = 1,
107990 + [59991].file = "drivers/media/video/uvc/uvc_queue.c",
107991 + [59991].name = "uvc_alloc_buffers",
107992 + [59991].param2 = 1,
107993 + [60005].file = "fs/namei.c",
107994 + [60005].name = "getname",
107995 + [60005].param1 = 1,
107996 + [60045].file = "drivers/net/usb/mcs7830.c",
107997 + [60045].name = "mcs7830_set_reg",
107998 + [60045].param3 = 1,
107999 + [60198].file = "fs/nfs/nfs4proc.c",
108000 + [60198].name = "nfs4_write_cached_acl",
108001 + [60198].param3 = 1,
108002 + [60331].file = "fs/squashfs/fragment.c",
108003 + [60331].name = "squashfs_read_fragment_index_table",
108004 + [60331].param3 = 1,
108005 + [60391].file = "drivers/ieee1394/raw1394.c",
108006 + [60391].name = "fcp_request",
108007 + [60391].param6 = 1,
108008 + [60651].file = "drivers/ide/ide-proc.c",
108009 + [60651].name = "ide_driver_proc_write",
108010 + [60651].param3 = 1,
108011 + [60683].file = "sound/drivers/opl4/opl4_proc.c",
108012 + [60683].name = "snd_opl4_mem_proc_write",
108013 + [60683].param5 = 1,
108014 + [60693].file = "drivers/misc/hpilo.c",
108015 + [60693].name = "ilo_read",
108016 + [60693].param3 = 1,
108017 + [60744].file = "sound/pci/emu10k1/emuproc.c",
108018 + [60744].name = "snd_emu10k1_fx8010_read",
108019 + [60744].param5 = 1,
108020 + [60878].file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
108021 + [60878].name = "rt2x00debug_read_queue_dump",
108022 + [60878].param3 = 1,
108023 + [61058].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
108024 + [61058].name = "iwl_dbgfs_disable_ht40_write",
108025 + [61058].param3 = 1,
108026 + [61552].file = "drivers/input/evdev.c",
108027 + [61552].name = "str_to_user",
108028 + [61552].param2 = 1,
108029 + [61770].file = "drivers/media/video/et61x251/et61x251_core.c",
108030 + [61770].name = "et61x251_read",
108031 + [61770].param3 = 1,
108032 + [62081].file = "drivers/net/irda/vlsi_ir.c",
108033 + [62081].name = "vlsi_alloc_ring",
108034 + [62081].param3 = 1,
108035 + [62378].file = "net/ipv4/tcp.c",
108036 + [62378].name = "do_tcp_setsockopt",
108037 + [62378].param5 = 1,
108038 + [62525].file = "net/mac80211/debugfs.c",
108039 + [62525].name = "tsf_write",
108040 + [62525].param3 = 1,
108041 + [62744].file = "drivers/char/mem.c",
108042 + [62744].name = "kmsg_write",
108043 + [62744].param3 = 1,
108044 + [62970].file = "net/sched/sch_api.c",
108045 + [62970].name = "qdisc_class_hash_alloc",
108046 + [62970].param1 = 1,
108047 + [63004].file = "drivers/usb/storage/datafab.c",
108048 + [63004].name = "datafab_write_data",
108049 + [63004].param4 = 1,
108050 + [63007].file = "fs/proc/base.c",
108051 + [63007].name = "proc_coredump_filter_write",
108052 + [63007].param3 = 1,
108053 + [63091].file = "drivers/net/usb/pegasus.c",
108054 + [63091].name = "get_registers",
108055 + [63091].param3 = 1,
108056 + [63169].file = "drivers/scsi/sg.c",
108057 + [63169].name = "sg_read",
108058 + [63169].param3 = 1,
108059 + [63489].file = "drivers/bluetooth/btmrvl_debugfs.c",
108060 + [63489].name = "btmrvl_hscfgcmd_write",
108061 + [63489].param3 = 1,
108062 + [63605].file = "mm/mempool.c",
108063 + [63605].name = "mempool_kmalloc",
108064 + [63605].param2 = 1,
108065 + [63765].file = "fs/seq_file.c",
108066 + [63765].name = "seq_read",
108067 + [63765].param3 = 1,
108068 + [64392].file = "drivers/mmc/core/mmc_ops.c",
108069 + [64392].name = "mmc_send_cxd_data",
108070 + [64392].param5 = 1,
108071 + [64471].file = "drivers/bluetooth/btmrvl_debugfs.c",
108072 + [64471].name = "btmrvl_hscmd_write",
108073 + [64471].param3 = 1,
108074 + [64743].file = "fs/ocfs2/dlm/dlmfs.c",
108075 + [64743].name = "dlmfs_file_read",
108076 + [64743].param3 = 1,
108077 + [65087].file = "drivers/net/usb/asix.c",
108078 + [65087].name = "asix_write_cmd",
108079 + [65087].param5 = 1,
108080 + [65098].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
108081 + [65098].name = "iwl_dbgfs_traffic_log_write",
108082 + [65098].param3 = 1,
108083 + [65195].file = "fs/jffs2/xattr.c",
108084 + [65195].name = "do_jffs2_setxattr",
108085 + [65195].param5 = 1,
108086 + [65207].file = "drivers/media/video/cpia.c",
108087 + [65207].name = "cpia_write_proc",
108088 + [65207].param3 = 1,
108089 + [65364].file = "sound/core/pcm_lib.c",
108090 + [65364].name = "snd_pcm_lib_read_transfer",
108091 + [65364].param5 = 1,
108092 + [65409].file = "net/802/garp.c",
108093 + [65409].name = "garp_request_join",
108094 + [65409].param4 = 1,
108095 + [65514].file = "drivers/media/video/gspca/t613.c",
108096 + [65514].name = "reg_w_ixbuf",
108097 + [65514].param4 = 1,
108098 + [6691].file = "drivers/acpi/proc.c",
108099 + [6691].name = "acpi_system_write_wakeup_device",
108100 + [6691].param3 = 1,
108101 + [680].file = "drivers/misc/ibmasm/ibmasmfs.c",
108102 + [680].name = "command_file_read",
108103 + [680].param3 = 1,
108104 + [6867].file = "fs/coda/psdev.c",
108105 + [6867].name = "coda_psdev_read",
108106 + [6867].param3 = 1,
108107 + [6891].file = "drivers/bluetooth/btmrvl_debugfs.c",
108108 + [6891].name = "btmrvl_gpiogap_write",
108109 + [6891].param3 = 1,
108110 + [720].file = "sound/pci/rme9652/hdsp.c",
108111 + [720].name = "snd_hdsp_playback_copy",
108112 + [720].param5 = 1,
108113 + [7488].file = "security/keys/user_defined.c",
108114 + [7488].name = "user_read",
108115 + [7488].param3 = 1,
108116 + [7664].file = "drivers/hid/hid-core.c",
108117 + [7664].name = "hid_parse_report",
108118 + [7664].param3 = 1,
108119 + [7810].file = "fs/squashfs/export.c",
108120 + [7810].name = "squashfs_read_inode_lookup_table",
108121 + [7810].param3 = 1,
108122 + [7958].file = "drivers/gpu/vga/vgaarb.c",
108123 + [7958].name = "vga_arb_write",
108124 + [7958].param3 = 1,
108125 + [7976].file = "drivers/usb/gadget/rndis.c",
108126 + [7976].name = "rndis_add_response",
108127 + [7976].param2 = 1,
108128 + [8285].file = "net/ipv4/tcp.c",
108129 + [8285].name = "tcp_setsockopt",
108130 + [8285].param5 = 1,
108131 + [8334].file = "drivers/scsi/sg.c",
108132 + [8334].name = "sg_proc_write_adio",
108133 + [8334].param3 = 1,
108134 + [8481].file = "drivers/isdn/i4l/isdn_common.c",
108135 + [8481].name = "isdn_write",
108136 + [8481].param3 = 1,
108137 + [8536].file = "fs/cifs/dns_resolve.c",
108138 + [8536].name = "dns_resolve_server_name_to_ip",
108139 + [8536].param1 = 1,
108140 + [8699].file = "fs/nfs/idmap.c",
108141 + [8699].name = "idmap_pipe_upcall",
108142 + [8699].param4 = 1,
108143 + [8764].file = "drivers/usb/core/devio.c",
108144 + [8764].name = "usbdev_read",
108145 + [8764].param3 = 1,
108146 + [8917].file = "net/ipv4/raw.c",
108147 + [8917].name = "raw_setsockopt",
108148 + [8917].param5 = 1,
108149 + [9463].file = "drivers/infiniband/hw/ipath/ipath_verbs.c",
108150 + [9463].name = "ipath_verbs_send",
108151 + [9463].param3 = 1,
108152 + [9463].param5 = 1,
108153 + [9702].file = "drivers/pcmcia/pcmcia_ioctl.c",
108154 + [9702].name = "ds_ioctl",
108155 + [9702].param3 = 1,
108156 + [9828].file = "drivers/media/dvb/dvb-core/dmxdev.c",
108157 + [9828].name = "dvb_demux_do_ioctl",
108158 + [9828].param4 = 1,
108159 + [9962].file = "drivers/scsi/sg.c",
108160 + [9962].name = "sg_proc_write_dressz",
108161 + [9962].param3 = 1,
108162 + [31291].collision = 1,
108163 + [38314].collision = 1,
108164 +};
108165 diff --git a/tools/gcc/size_overflow_hash2.h b/tools/gcc/size_overflow_hash2.h
108166 new file mode 100644
108167 index 0000000..7176f29
108168 --- /dev/null
108169 +++ b/tools/gcc/size_overflow_hash2.h
108170 @@ -0,0 +1,14 @@
108171 +struct size_overflow_hash size_overflow_hash2[65536] = {
108172 + [39105].file = "drivers/gpu/drm/ttm/ttm_tt.c",
108173 + [39105].name = "ttm_tt_create",
108174 + [39105].param2 = 1,
108175 + [43208].file = "fs/nfs/read.c",
108176 + [43208].name = "nfs_readdata_alloc",
108177 + [43208].param1 = 1,
108178 + [46911].file = "drivers/media/video/ivtv/ivtv-fileops.c",
108179 + [46911].name = "ivtv_v4l2_read",
108180 + [46911].param3 = 1,
108181 + [52857].file = "sound/pci/rme9652/rme9652.c",
108182 + [52857].name = "snd_rme9652_capture_copy",
108183 + [52857].param5 = 1,
108184 +};
108185 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
108186 new file mode 100644
108187 index 0000000..a9ae886
108188 --- /dev/null
108189 +++ b/tools/gcc/size_overflow_plugin.c
108190 @@ -0,0 +1,1042 @@
108191 +/*
108192 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
108193 + * Licensed under the GPL v2, or (at your option) v3
108194 + *
108195 + * Homepage:
108196 + * http://www.grsecurity.net/~ephox/overflow_plugin/
108197 + *
108198 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
108199 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
108200 + * The recomputed argument is checked against INT_MAX and an event is logged on overflow and the triggering process is killed.
108201 + *
108202 + * Usage:
108203 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o size_overflow_plugin.so size_overflow_plugin.c
108204 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
108205 + */
108206 +
108207 +#include "gcc-plugin.h"
108208 +#include "config.h"
108209 +#include "system.h"
108210 +#include "coretypes.h"
108211 +#include "tree.h"
108212 +#include "tree-pass.h"
108213 +#include "intl.h"
108214 +#include "plugin-version.h"
108215 +#include "tm.h"
108216 +#include "toplev.h"
108217 +#include "function.h"
108218 +#include "tree-flow.h"
108219 +#include "plugin.h"
108220 +#include "gimple.h"
108221 +#include "c-common.h"
108222 +#include "diagnostic.h"
108223 +
108224 +struct size_overflow_hash {
108225 + const char *name;
108226 + const char *file;
108227 + unsigned short collision:1;
108228 + unsigned short param1:1;
108229 + unsigned short param2:1;
108230 + unsigned short param3:1;
108231 + unsigned short param4:1;
108232 + unsigned short param5:1;
108233 + unsigned short param6:1;
108234 + unsigned short param7:1;
108235 + unsigned short param8:1;
108236 + unsigned short param9:1;
108237 +};
108238 +
108239 +#include "size_overflow_hash1.h"
108240 +#include "size_overflow_hash2.h"
108241 +
108242 +#define __unused __attribute__((__unused__))
108243 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
108244 +#define BEFORE_STMT true
108245 +#define AFTER_STMT false
108246 +#define CREATE_NEW_VAR NULL_TREE
108247 +
108248 +int plugin_is_GPL_compatible;
108249 +void debug_gimple_stmt (gimple gs);
108250 +
108251 +static tree expand(struct pointer_set_t *visited, tree var);
108252 +static tree signed_size_overflow_type;
108253 +static tree unsigned_size_overflow_type;
108254 +static tree report_size_overflow_decl;
108255 +static tree const_char_ptr_type_node;
108256 +static unsigned int handle_function(void);
108257 +
108258 +static struct plugin_info size_overflow_plugin_info = {
108259 + .version = "20120311beta",
108260 + .help = "no-size_overflow\tturn off size overflow checking\n",
108261 +};
108262 +
108263 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
108264 +{
108265 + unsigned int arg_count = type_num_arguments(*node);
108266 +
108267 + for (; args; args = TREE_CHAIN(args)) {
108268 + tree position = TREE_VALUE(args);
108269 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
108270 + error("handle_size_overflow_attribute: overflow parameter outside range.");
108271 + *no_add_attrs = true;
108272 + }
108273 + }
108274 + return NULL_TREE;
108275 +}
108276 +
108277 +static struct attribute_spec no_size_overflow_attr = {
108278 + .name = "size_overflow",
108279 + .min_length = 1,
108280 + .max_length = -1,
108281 + .decl_required = false,
108282 + .type_required = true,
108283 + .function_type_required = true,
108284 + .handler = handle_size_overflow_attribute
108285 +};
108286 +
108287 +static void register_attributes(void __unused *event_data, void __unused *data)
108288 +{
108289 + register_attribute(&no_size_overflow_attr);
108290 +}
108291 +
108292 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
108293 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
108294 +{
108295 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
108296 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
108297 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
108298 +
108299 + const unsigned int m = 0x57559429;
108300 + const unsigned int n = 0x5052acdb;
108301 + const unsigned int *key4 = (const unsigned int *)key;
108302 + unsigned int h = len;
108303 + unsigned int k = len + seed + n;
108304 + unsigned long long p;
108305 +
108306 + while (len >= 8) {
108307 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
108308 + len -= 8;
108309 + }
108310 + if (len >= 4) {
108311 + cwmixb(key4[0]) key4 += 1;
108312 + len -= 4;
108313 + }
108314 + if (len)
108315 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
108316 + cwmixb(h ^ (k + n));
108317 + return k ^ h;
108318 +
108319 +#undef cwfold
108320 +#undef cwmixa
108321 +#undef cwmixb
108322 +}
108323 +
108324 +static inline unsigned int size_overflow_hash(const char *fndecl, unsigned int seed)
108325 +{
108326 + return CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
108327 +}
108328 +
108329 +static inline tree get_original_function_decl(tree fndecl)
108330 +{
108331 + if (DECL_ABSTRACT_ORIGIN(fndecl))
108332 + return DECL_ABSTRACT_ORIGIN(fndecl);
108333 + return fndecl;
108334 +}
108335 +
108336 +static inline gimple get_def_stmt(tree node)
108337 +{
108338 + gcc_assert(TREE_CODE(node) == SSA_NAME);
108339 + return SSA_NAME_DEF_STMT(node);
108340 +}
108341 +
108342 +static struct size_overflow_hash *get_function_hash(tree fndecl)
108343 +{
108344 + unsigned int hash;
108345 + const char *func = NAME(fndecl);
108346 +
108347 + hash = size_overflow_hash(func, 0);
108348 +
108349 + if (size_overflow_hash1[hash].collision) {
108350 + hash = size_overflow_hash(func, 23432);
108351 + return &size_overflow_hash2[hash];
108352 + }
108353 + return &size_overflow_hash1[hash];
108354 +}
108355 +
108356 +static void check_missing_attribute(tree arg)
108357 +{
108358 + tree var, func = get_original_function_decl(current_function_decl);
108359 + const char *curfunc = NAME(func);
108360 + unsigned int new_hash, argnum = 1;
108361 + struct size_overflow_hash *hash;
108362 + location_t loc;
108363 + expanded_location xloc;
108364 + bool match = false;
108365 +
108366 + loc = DECL_SOURCE_LOCATION(func);
108367 + xloc = expand_location(loc);
108368 +
108369 + if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
108370 + return;
108371 +
108372 + hash = get_function_hash(func);
108373 + if (hash->name && !strcmp(hash->name, NAME(func)) && !strcmp(hash->file, xloc.file))
108374 + return;
108375 +
108376 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
108377 +
108378 + if (TREE_CODE(arg) == SSA_NAME)
108379 + arg = SSA_NAME_VAR(arg);
108380 +
108381 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
108382 + if (strcmp(NAME(arg), NAME(var))) {
108383 + argnum++;
108384 + continue;
108385 + }
108386 + match = true;
108387 + if (!TYPE_UNSIGNED(TREE_TYPE(var)))
108388 + return;
108389 + break;
108390 + }
108391 + if (!match) {
108392 + warning(0, "check_missing_attribute: cannot find the %s argument in %s", NAME(arg), NAME(func));
108393 + return;
108394 + }
108395 +
108396 +#define check_param(num) \
108397 + if (num == argnum && hash->param##num) \
108398 + return;
108399 + check_param(1);
108400 + check_param(2);
108401 + check_param(3);
108402 + check_param(4);
108403 + check_param(5);
108404 + check_param(6);
108405 + check_param(7);
108406 + check_param(8);
108407 + check_param(9);
108408 +#undef check_param
108409 +
108410 + new_hash = size_overflow_hash(curfunc, 0);
108411 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+%s", curfunc, curfunc, argnum, new_hash, xloc.file);
108412 +}
108413 +
108414 +static tree create_new_var(tree type)
108415 +{
108416 + tree new_var = create_tmp_var(type, "cicus");
108417 +
108418 + add_referenced_var(new_var);
108419 + mark_sym_for_renaming(new_var);
108420 + return new_var;
108421 +}
108422 +
108423 +static bool is_bool(tree node)
108424 +{
108425 + tree type;
108426 +
108427 + if (node == NULL_TREE)
108428 + return false;
108429 +
108430 + type = TREE_TYPE(node);
108431 + if (!INTEGRAL_TYPE_P(type))
108432 + return false;
108433 + if (TREE_CODE(type) == BOOLEAN_TYPE)
108434 + return true;
108435 + if (TYPE_PRECISION(type) == 1)
108436 + return true;
108437 + return false;
108438 +}
108439 +
108440 +static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
108441 +{
108442 + gimple assign;
108443 +
108444 + if (new_var == CREATE_NEW_VAR)
108445 + new_var = create_new_var(type);
108446 +
108447 + assign = gimple_build_assign(new_var, fold_convert(type, var));
108448 + gimple_set_location(assign, loc);
108449 + gimple_set_lhs(assign, make_ssa_name(new_var, assign));
108450 +
108451 + return assign;
108452 +}
108453 +
108454 +static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
108455 +{
108456 + tree oldstmt_rhs1;
108457 + enum tree_code code;
108458 + gimple stmt;
108459 + gimple_stmt_iterator gsi;
108460 +
108461 + if (is_bool(rhs1)) {
108462 + pointer_set_insert(visited, oldstmt);
108463 + return gimple_get_lhs(oldstmt);
108464 + }
108465 +
108466 + if (rhs1 == NULL_TREE) {
108467 + debug_gimple_stmt(oldstmt);
108468 + error("create_assign: rhs1 is NULL_TREE");
108469 + gcc_unreachable();
108470 + }
108471 +
108472 + oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
108473 + code = TREE_CODE(oldstmt_rhs1);
108474 + if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
108475 + check_missing_attribute(oldstmt_rhs1);
108476 +
108477 + stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
108478 + gsi = gsi_for_stmt(oldstmt);
108479 + if (before)
108480 + gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
108481 + else
108482 + gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
108483 + update_stmt(stmt);
108484 + pointer_set_insert(visited, oldstmt);
108485 + return gimple_get_lhs(stmt);
108486 +}
108487 +
108488 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
108489 +{
108490 + tree new_var, lhs = gimple_get_lhs(oldstmt);
108491 + gimple stmt;
108492 + gimple_stmt_iterator gsi;
108493 +
108494 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
108495 + rhs1 = gimple_assign_rhs1(oldstmt);
108496 + rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
108497 + }
108498 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
108499 + rhs2 = gimple_assign_rhs2(oldstmt);
108500 + rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
108501 + }
108502 +
108503 + stmt = gimple_copy(oldstmt);
108504 + gimple_set_location(stmt, gimple_location(oldstmt));
108505 +
108506 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
108507 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
108508 +
108509 + if (is_bool(lhs))
108510 + new_var = SSA_NAME_VAR(lhs);
108511 + else
108512 + new_var = create_new_var(signed_size_overflow_type);
108513 + new_var = make_ssa_name(new_var, stmt);
108514 + gimple_set_lhs(stmt, new_var);
108515 +
108516 + if (rhs1 != NULL_TREE) {
108517 + if (!gimple_assign_cast_p(oldstmt))
108518 + rhs1 = fold_convert(signed_size_overflow_type, rhs1);
108519 + gimple_assign_set_rhs1(stmt, rhs1);
108520 + }
108521 +
108522 + if (rhs2 != NULL_TREE)
108523 + gimple_assign_set_rhs2(stmt, rhs2);
108524 +#if BUILDING_GCC_VERSION >= 4007
108525 + if (rhs3 != NULL_TREE)
108526 + gimple_assign_set_rhs3(stmt, rhs3);
108527 +#endif
108528 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
108529 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
108530 +
108531 + gsi = gsi_for_stmt(oldstmt);
108532 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
108533 + update_stmt(stmt);
108534 + pointer_set_insert(visited, oldstmt);
108535 + return gimple_get_lhs(stmt);
108536 +}
108537 +
108538 +static gimple overflow_create_phi_node(gimple oldstmt, tree var)
108539 +{
108540 + basic_block bb;
108541 + gimple phi;
108542 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
108543 +
108544 + bb = gsi_bb(gsi);
108545 + phi = make_phi_node(var, EDGE_COUNT(bb->preds));
108546 +
108547 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
108548 + gimple_set_bb(phi, bb);
108549 + return phi;
108550 +}
108551 +
108552 +static tree signed_cast_constant(tree node)
108553 +{
108554 + gcc_assert(is_gimple_constant(node));
108555 +
108556 + if (TYPE_PRECISION(signed_size_overflow_type) == TYPE_PRECISION(TREE_TYPE(node)))
108557 + return build_int_cst_wide(signed_size_overflow_type, TREE_INT_CST_LOW(node), TREE_INT_CST_HIGH(node));
108558 + else
108559 + return build_int_cst(signed_size_overflow_type, int_cst_value(node));
108560 +}
108561 +
108562 +static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var)
108563 +{
108564 + basic_block first_bb;
108565 + gimple newstmt;
108566 + gimple_stmt_iterator gsi;
108567 +
108568 + newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
108569 +
108570 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
108571 + if (dom_info_available_p(CDI_DOMINATORS))
108572 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
108573 + gsi = gsi_start_bb(first_bb);
108574 +
108575 + gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
108576 + return newstmt;
108577 +}
108578 +
108579 +static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
108580 +{
108581 + gimple newstmt;
108582 + gimple_stmt_iterator gsi;
108583 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
108584 + gimple def_newstmt = get_def_stmt(new_rhs);
108585 +
108586 + gsi_insert = gsi_insert_after;
108587 + gsi = gsi_for_stmt(def_newstmt);
108588 +
108589 + switch (gimple_code(get_def_stmt(arg))) {
108590 + case GIMPLE_PHI:
108591 + newstmt = gimple_build_assign(new_var, new_rhs);
108592 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
108593 + gsi_insert = gsi_insert_before;
108594 + break;
108595 + case GIMPLE_ASM:
108596 + case GIMPLE_CALL:
108597 + newstmt = gimple_build_assign(new_var, new_rhs);
108598 + break;
108599 + case GIMPLE_ASSIGN:
108600 + newstmt = gimple_copy(def_newstmt);
108601 + break;
108602 + default:
108603 + /* unknown gimple_code (build_new_phi_arg) */
108604 + gcc_unreachable();
108605 + }
108606 +
108607 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
108608 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
108609 + return newstmt;
108610 +}
108611 +
108612 +static tree build_new_phi_arg(struct pointer_set_t *visited, gimple oldstmt, tree arg, tree new_var)
108613 +{
108614 + gimple newstmt;
108615 + tree new_rhs;
108616 +
108617 + if (is_gimple_constant(arg))
108618 + return signed_cast_constant(arg);
108619 +
108620 + pointer_set_insert(visited, oldstmt);
108621 + new_rhs = expand(visited, arg);
108622 + if (new_rhs == NULL_TREE) {
108623 + gcc_assert(TREE_CODE(TREE_TYPE(arg)) != VOID_TYPE);
108624 + newstmt = cast_old_phi_arg(oldstmt, arg, new_var);
108625 + } else
108626 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
108627 + update_stmt(newstmt);
108628 + return gimple_get_lhs(newstmt);
108629 +}
108630 +
108631 +static tree build_new_phi(struct pointer_set_t *visited, gimple oldstmt)
108632 +{
108633 + gimple phi;
108634 + tree new_var = create_new_var(signed_size_overflow_type);
108635 + unsigned int i, n = gimple_phi_num_args(oldstmt);
108636 +
108637 + phi = overflow_create_phi_node(oldstmt, new_var);
108638 +
108639 + for (i = 0; i < n; i++) {
108640 + tree arg, lhs;
108641 +
108642 + arg = gimple_phi_arg_def(oldstmt, i);
108643 + lhs = build_new_phi_arg(visited, oldstmt, arg, new_var);
108644 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
108645 + }
108646 + update_stmt(phi);
108647 + return gimple_phi_result(phi);
108648 +}
108649 +
108650 +static tree handle_unary_ops(struct pointer_set_t *visited, tree var)
108651 +{
108652 + gimple def_stmt = get_def_stmt(var);
108653 + tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
108654 +
108655 + if (is_gimple_constant(rhs1))
108656 + return dup_assign(visited, def_stmt, signed_cast_constant(rhs1), NULL_TREE, NULL_TREE);
108657 +
108658 + switch (TREE_CODE(rhs1)) {
108659 + case SSA_NAME:
108660 + new_rhs1 = expand(visited, rhs1);
108661 + break;
108662 + case ARRAY_REF:
108663 + case ADDR_EXPR:
108664 + case COMPONENT_REF:
108665 + case COND_EXPR:
108666 + case INDIRECT_REF:
108667 +#if BUILDING_GCC_VERSION >= 4006
108668 + case MEM_REF:
108669 +#endif
108670 + case PARM_DECL:
108671 + case TARGET_MEM_REF:
108672 + case VAR_DECL:
108673 + return create_assign(visited, def_stmt, var, AFTER_STMT);
108674 + default:
108675 + debug_gimple_stmt(def_stmt);
108676 + debug_tree(rhs1);
108677 + gcc_unreachable();
108678 + }
108679 +
108680 + if (new_rhs1 == NULL_TREE)
108681 + return create_assign(visited, def_stmt, rhs1, AFTER_STMT);
108682 + return dup_assign(visited, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
108683 +}
108684 +
108685 +static tree transform_mult_overflow(tree rhs, tree const_rhs, tree log2const_rhs, location_t loc)
108686 +{
108687 + tree new_def_rhs;
108688 +
108689 + if (!is_gimple_constant(rhs))
108690 + return NULL_TREE;
108691 +
108692 + new_def_rhs = fold_build2_loc(loc, MULT_EXPR, TREE_TYPE(const_rhs), rhs, const_rhs);
108693 + new_def_rhs = signed_cast_constant(new_def_rhs);
108694 + if (int_cst_value(new_def_rhs) >= 0)
108695 + return NULL_TREE;
108696 + return fold_build2_loc(loc, RSHIFT_EXPR, TREE_TYPE(new_def_rhs), new_def_rhs, log2const_rhs);
108697 +}
108698 +
108699 +static tree handle_intentional_mult_overflow(struct pointer_set_t *visited, tree rhs, tree const_rhs)
108700 +{
108701 + gimple new_def_stmt, def_stmt;
108702 + tree def_rhs1, def_rhs2, new_def_rhs;
108703 + location_t loc;
108704 + tree log2const_rhs;
108705 + int log2 = exact_log2(TREE_INT_CST_LOW(const_rhs));
108706 +
108707 + if (log2 == -1) {
108708 +// warning(0, "Possibly unhandled intentional integer truncation");
108709 + return NULL_TREE;
108710 + }
108711 +
108712 + def_stmt = get_def_stmt(rhs);
108713 + loc = gimple_location(def_stmt);
108714 + def_rhs1 = gimple_assign_rhs1(def_stmt);
108715 + def_rhs2 = gimple_assign_rhs2(def_stmt);
108716 + new_def_stmt = get_def_stmt(expand(visited, rhs));
108717 + log2const_rhs = build_int_cstu(TREE_TYPE(const_rhs), log2);
108718 +
108719 + new_def_rhs = transform_mult_overflow(def_rhs1, const_rhs, log2const_rhs, loc);
108720 + if (new_def_rhs != NULL_TREE) {
108721 + gimple_assign_set_rhs1(new_def_stmt, new_def_rhs);
108722 + } else {
108723 + new_def_rhs = transform_mult_overflow(def_rhs2, const_rhs, log2const_rhs, loc);
108724 + if (new_def_rhs != NULL_TREE)
108725 + gimple_assign_set_rhs2(new_def_stmt, new_def_rhs);
108726 + }
108727 + if (new_def_rhs == NULL_TREE)
108728 + return NULL_TREE;
108729 +
108730 + update_stmt(new_def_stmt);
108731 +// warning(0, "Handle integer truncation (gcc optimization)");
108732 + return gimple_get_lhs(new_def_stmt);
108733 +}
108734 +
108735 +static bool is_mult_overflow(gimple def_stmt, tree rhs1)
108736 +{
108737 + gimple rhs1_def_stmt = get_def_stmt(rhs1);
108738 +
108739 + if (gimple_assign_rhs_code(def_stmt) != MULT_EXPR)
108740 + return false;
108741 + if (gimple_code(rhs1_def_stmt) != GIMPLE_ASSIGN)
108742 + return false;
108743 + if (gimple_assign_rhs_code(rhs1_def_stmt) != PLUS_EXPR)
108744 + return false;
108745 + return true;
108746 +}
108747 +
108748 +static tree handle_intentional_overflow(struct pointer_set_t *visited, gimple def_stmt, tree rhs1, tree rhs2)
108749 +{
108750 + if (is_mult_overflow(def_stmt, rhs1))
108751 + return handle_intentional_mult_overflow(visited, rhs1, rhs2);
108752 + return NULL_TREE;
108753 +}
108754 +
108755 +static tree handle_binary_ops(struct pointer_set_t *visited, tree var)
108756 +{
108757 + tree rhs1, rhs2;
108758 + gimple def_stmt = get_def_stmt(var);
108759 + tree new_rhs1 = NULL_TREE;
108760 + tree new_rhs2 = NULL_TREE;
108761 +
108762 + rhs1 = gimple_assign_rhs1(def_stmt);
108763 + rhs2 = gimple_assign_rhs2(def_stmt);
108764 +
108765 + /* no DImode/TImode division in the 32/64 bit kernel */
108766 + switch (gimple_assign_rhs_code(def_stmt)) {
108767 + case RDIV_EXPR:
108768 + case TRUNC_DIV_EXPR:
108769 + case CEIL_DIV_EXPR:
108770 + case FLOOR_DIV_EXPR:
108771 + case ROUND_DIV_EXPR:
108772 + case TRUNC_MOD_EXPR:
108773 + case CEIL_MOD_EXPR:
108774 + case FLOOR_MOD_EXPR:
108775 + case ROUND_MOD_EXPR:
108776 + case EXACT_DIV_EXPR:
108777 + case POINTER_PLUS_EXPR:
108778 + /* logical AND cannot cause an overflow */
108779 + case BIT_AND_EXPR:
108780 + return create_assign(visited, def_stmt, var, AFTER_STMT);
108781 + default:
108782 + break;
108783 + }
108784 +
108785 + if (is_gimple_constant(rhs2)) {
108786 + new_rhs2 = signed_cast_constant(rhs2);
108787 + new_rhs1 = handle_intentional_overflow(visited, def_stmt, rhs1, rhs2);
108788 + }
108789 +
108790 + if (is_gimple_constant(rhs1)) {
108791 + new_rhs1 = signed_cast_constant(rhs1);
108792 + new_rhs2 = handle_intentional_overflow(visited, def_stmt, rhs2, rhs1);
108793 + }
108794 +
108795 + if (new_rhs1 == NULL_TREE && TREE_CODE(rhs1) == SSA_NAME)
108796 + new_rhs1 = expand(visited, rhs1);
108797 + if (new_rhs2 == NULL_TREE && TREE_CODE(rhs2) == SSA_NAME)
108798 + new_rhs2 = expand(visited, rhs2);
108799 +
108800 + return dup_assign(visited, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
108801 +}
108802 +
108803 +#if BUILDING_GCC_VERSION >= 4007
108804 +static tree get_new_rhs(struct pointer_set_t *visited, tree rhs)
108805 +{
108806 + if (is_gimple_constant(rhs))
108807 + return signed_cast_constant(rhs);
108808 + if (TREE_CODE(rhs) != SSA_NAME)
108809 + return NULL_TREE;
108810 + return expand(visited, rhs);
108811 +}
108812 +
108813 +static tree handle_ternary_ops(struct pointer_set_t *visited, tree var)
108814 +{
108815 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
108816 + gimple def_stmt = get_def_stmt(var);
108817 +
108818 + rhs1 = gimple_assign_rhs1(def_stmt);
108819 + rhs2 = gimple_assign_rhs2(def_stmt);
108820 + rhs3 = gimple_assign_rhs3(def_stmt);
108821 + new_rhs1 = get_new_rhs(visited, rhs1);
108822 + new_rhs2 = get_new_rhs(visited, rhs2);
108823 + new_rhs3 = get_new_rhs(visited, rhs3);
108824 +
108825 + if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
108826 + return dup_assign(visited, def_stmt, new_rhs1, new_rhs2, new_rhs3);
108827 + error("handle_ternary_ops: unknown rhs");
108828 + gcc_unreachable();
108829 +}
108830 +#endif
108831 +
108832 +static void set_size_overflow_type(tree node)
108833 +{
108834 + switch (TYPE_MODE(TREE_TYPE(node))) {
108835 + case SImode:
108836 + signed_size_overflow_type = intDI_type_node;
108837 + unsigned_size_overflow_type = unsigned_intDI_type_node;
108838 + break;
108839 + case DImode:
108840 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
108841 + signed_size_overflow_type = intDI_type_node;
108842 + unsigned_size_overflow_type = unsigned_intDI_type_node;
108843 + } else {
108844 + signed_size_overflow_type = intTI_type_node;
108845 + unsigned_size_overflow_type = unsigned_intTI_type_node;
108846 + }
108847 + break;
108848 + default:
108849 + error("set_size_overflow_type: unsupported gcc configuration.");
108850 + gcc_unreachable();
108851 + }
108852 +}
108853 +
108854 +static tree expand_visited(gimple def_stmt)
108855 +{
108856 + gimple tmp;
108857 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
108858 +
108859 + gsi_next(&gsi);
108860 + tmp = gsi_stmt(gsi);
108861 + switch (gimple_code(tmp)) {
108862 + case GIMPLE_ASSIGN:
108863 + return gimple_get_lhs(tmp);
108864 + case GIMPLE_PHI:
108865 + return gimple_phi_result(tmp);
108866 + case GIMPLE_CALL:
108867 + return gimple_call_lhs(tmp);
108868 + default:
108869 + return NULL_TREE;
108870 + }
108871 +}
108872 +
108873 +static tree expand(struct pointer_set_t *visited, tree var)
108874 +{
108875 + gimple def_stmt;
108876 +
108877 + if (is_gimple_constant(var))
108878 + return NULL_TREE;
108879 +
108880 + if (TREE_CODE(var) == ADDR_EXPR)
108881 + return NULL_TREE;
108882 +
108883 + if (SSA_NAME_IS_DEFAULT_DEF(var))
108884 + return NULL_TREE;
108885 +
108886 + def_stmt = get_def_stmt(var);
108887 +
108888 + if (!def_stmt)
108889 + return NULL_TREE;
108890 +
108891 + if (pointer_set_contains(visited, def_stmt))
108892 + return expand_visited(def_stmt);
108893 +
108894 + switch (gimple_code(def_stmt)) {
108895 + case GIMPLE_NOP:
108896 + check_missing_attribute(var);
108897 + return NULL_TREE;
108898 + case GIMPLE_PHI:
108899 + return build_new_phi(visited, def_stmt);
108900 + case GIMPLE_CALL:
108901 + case GIMPLE_ASM:
108902 + gcc_assert(TREE_CODE(TREE_TYPE(var)) != VOID_TYPE);
108903 + return create_assign(visited, def_stmt, var, AFTER_STMT);
108904 + case GIMPLE_ASSIGN:
108905 + switch (gimple_num_ops(def_stmt)) {
108906 + case 2:
108907 + return handle_unary_ops(visited, var);
108908 + case 3:
108909 + return handle_binary_ops(visited, var);
108910 +#if BUILDING_GCC_VERSION >= 4007
108911 + case 4:
108912 + return handle_ternary_ops(visited, var);
108913 +#endif
108914 + }
108915 + default:
108916 + debug_gimple_stmt(def_stmt);
108917 + error("expand: unknown gimple code");
108918 + gcc_unreachable();
108919 + }
108920 +}
108921 +
108922 +static void change_function_arg(gimple func_stmt, tree origarg, unsigned int argnum, tree newarg)
108923 +{
108924 + gimple assign, stmt;
108925 + gimple_stmt_iterator gsi = gsi_for_stmt(func_stmt);
108926 + tree origtype = TREE_TYPE(origarg);
108927 +
108928 + stmt = gsi_stmt(gsi);
108929 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
108930 +
108931 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
108932 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
108933 + update_stmt(assign);
108934 +
108935 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
108936 + update_stmt(stmt);
108937 +}
108938 +
108939 +static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
108940 +{
108941 + const char *origid;
108942 + tree arg, origarg;
108943 +
108944 + if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
108945 + gcc_assert(gimple_call_num_args(stmt) > argnum);
108946 + return gimple_call_arg(stmt, argnum);
108947 + }
108948 +
108949 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
108950 + while (origarg && argnum) {
108951 + argnum--;
108952 + origarg = TREE_CHAIN(origarg);
108953 + }
108954 +
108955 + gcc_assert(argnum == 0);
108956 +
108957 + gcc_assert(origarg != NULL_TREE);
108958 + origid = NAME(origarg);
108959 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
108960 + if (!strcmp(origid, NAME(arg)))
108961 + return arg;
108962 + }
108963 + return NULL_TREE;
108964 +}
108965 +
108966 +static void insert_cond(tree arg, basic_block cond_bb)
108967 +{
108968 + gimple cond_stmt;
108969 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
108970 +
108971 + cond_stmt = gimple_build_cond(GT_EXPR, arg, build_int_cstu(signed_size_overflow_type, 0x7fffffff), NULL_TREE, NULL_TREE);
108972 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
108973 + update_stmt(cond_stmt);
108974 +}
108975 +
108976 +static tree create_string_param(tree string)
108977 +{
108978 + tree array_ref = build4(ARRAY_REF, TREE_TYPE(string), string, integer_zero_node, NULL, NULL);
108979 +
108980 + return build1(ADDR_EXPR, ptr_type_node, array_ref);
108981 +}
108982 +
108983 +static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
108984 +{
108985 + gimple func_stmt, def_stmt;
108986 + tree current_func, loc_file, loc_line;
108987 + expanded_location xloc;
108988 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
108989 +
108990 + def_stmt = get_def_stmt(arg);
108991 + xloc = expand_location(gimple_location(def_stmt));
108992 +
108993 + if (!gimple_has_location(def_stmt)) {
108994 + xloc = expand_location(gimple_location(stmt));
108995 + gcc_assert(gimple_has_location(stmt));
108996 + }
108997 +
108998 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
108999 +
109000 + loc_file = build_string(strlen(xloc.file), xloc.file);
109001 + TREE_TYPE(loc_file) = char_array_type_node;
109002 + loc_file = create_string_param(loc_file);
109003 +
109004 + current_func = build_string(IDENTIFIER_LENGTH(DECL_NAME(current_function_decl)), NAME(current_function_decl));
109005 + TREE_TYPE(current_func) = char_array_type_node;
109006 + current_func = create_string_param(current_func);
109007 +
109008 + // void report_size_overflow(const char *file, unsigned int line, const char *func)
109009 + func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
109010 +
109011 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
109012 +}
109013 +
109014 +static void insert_check_size_overflow(gimple stmt, tree arg)
109015 +{
109016 + basic_block cond_bb, join_bb, bb_true;
109017 + edge e;
109018 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
109019 +
109020 + cond_bb = gimple_bb(stmt);
109021 + gsi_prev(&gsi);
109022 + if (gsi_end_p(gsi))
109023 + e = split_block_after_labels(cond_bb);
109024 + else
109025 + e = split_block(cond_bb, gsi_stmt(gsi));
109026 + cond_bb = e->src;
109027 + join_bb = e->dest;
109028 + e->flags = EDGE_FALSE_VALUE;
109029 + e->probability = REG_BR_PROB_BASE;
109030 +
109031 + bb_true = create_empty_bb(cond_bb);
109032 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
109033 +
109034 + if (dom_info_available_p(CDI_DOMINATORS)) {
109035 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
109036 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
109037 + }
109038 +
109039 + insert_cond(arg, cond_bb);
109040 + insert_cond_result(bb_true, stmt, arg);
109041 +}
109042 +
109043 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
109044 +{
109045 + struct pointer_set_t *visited;
109046 + tree arg, newarg;
109047 + gimple ucast_stmt;
109048 + gimple_stmt_iterator gsi;
109049 + location_t loc = gimple_location(stmt);
109050 +
109051 + arg = get_function_arg(argnum, stmt, fndecl);
109052 + if (arg == NULL_TREE)
109053 + return;
109054 +
109055 + if (is_gimple_constant(arg))
109056 + return;
109057 + if (TREE_CODE(arg) != SSA_NAME)
109058 + return;
109059 +
109060 + set_size_overflow_type(arg);
109061 + visited = pointer_set_create();
109062 + newarg = expand(visited, arg);
109063 + pointer_set_destroy(visited);
109064 +
109065 + if (newarg == NULL_TREE)
109066 + return;
109067 +
109068 + change_function_arg(stmt, arg, argnum, newarg);
109069 +
109070 + ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, newarg, CREATE_NEW_VAR, loc);
109071 + gsi = gsi_for_stmt(stmt);
109072 + gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
109073 +
109074 + insert_check_size_overflow(stmt, gimple_get_lhs(ucast_stmt));
109075 +// inform(loc, "Integer size_overflow check applied here.");
109076 +}
109077 +
109078 +static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
109079 +{
109080 + tree p = TREE_VALUE(attr);
109081 + do {
109082 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
109083 + p = TREE_CHAIN(p);
109084 + } while (p);
109085 +}
109086 +
109087 +static void handle_function_by_hash(gimple stmt, tree fndecl)
109088 +{
109089 + struct size_overflow_hash *hash;
109090 + expanded_location xloc;
109091 +
109092 + hash = get_function_hash(fndecl);
109093 + xloc = expand_location(DECL_SOURCE_LOCATION(fndecl));
109094 +
109095 + fndecl = get_original_function_decl(fndecl);
109096 + if (!hash->name || !hash->file)
109097 + return;
109098 + if (strcmp(hash->name, NAME(fndecl)) || strcmp(hash->file, xloc.file))
109099 + return;
109100 +
109101 +#define search_param(argnum) \
109102 + if (hash->param##argnum) \
109103 + handle_function_arg(stmt, fndecl, argnum - 1);
109104 +
109105 + search_param(1);
109106 + search_param(2);
109107 + search_param(3);
109108 + search_param(4);
109109 + search_param(5);
109110 + search_param(6);
109111 + search_param(7);
109112 + search_param(8);
109113 + search_param(9);
109114 +#undef search_param
109115 +}
109116 +
109117 +static unsigned int handle_function(void)
109118 +{
109119 + basic_block bb = ENTRY_BLOCK_PTR->next_bb;
109120 + int saved_last_basic_block = last_basic_block;
109121 +
109122 + do {
109123 + gimple_stmt_iterator gsi;
109124 + basic_block next = bb->next_bb;
109125 +
109126 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
109127 + tree fndecl, attr;
109128 + gimple stmt = gsi_stmt(gsi);
109129 +
109130 + if (!(is_gimple_call(stmt)))
109131 + continue;
109132 + fndecl = gimple_call_fndecl(stmt);
109133 + if (fndecl == NULL_TREE)
109134 + continue;
109135 + if (gimple_call_num_args(stmt) == 0)
109136 + continue;
109137 + attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
109138 + if (!attr || !TREE_VALUE(attr))
109139 + handle_function_by_hash(stmt, fndecl);
109140 + else
109141 + handle_function_by_attribute(stmt, attr, fndecl);
109142 + gsi = gsi_for_stmt(stmt);
109143 + }
109144 + bb = next;
109145 + } while (bb && bb->index <= saved_last_basic_block);
109146 + return 0;
109147 +}
109148 +
109149 +static struct gimple_opt_pass size_overflow_pass = {
109150 + .pass = {
109151 + .type = GIMPLE_PASS,
109152 + .name = "size_overflow",
109153 + .gate = NULL,
109154 + .execute = handle_function,
109155 + .sub = NULL,
109156 + .next = NULL,
109157 + .static_pass_number = 0,
109158 + .tv_id = TV_NONE,
109159 + .properties_required = PROP_cfg | PROP_referenced_vars,
109160 + .properties_provided = 0,
109161 + .properties_destroyed = 0,
109162 + .todo_flags_start = 0,
109163 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
109164 + }
109165 +};
109166 +
109167 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
109168 +{
109169 + tree fntype;
109170 +
109171 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
109172 +
109173 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
109174 + fntype = build_function_type_list(void_type_node,
109175 + const_char_ptr_type_node,
109176 + unsigned_type_node,
109177 + const_char_ptr_type_node,
109178 + NULL_TREE);
109179 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
109180 +
109181 + TREE_PUBLIC(report_size_overflow_decl) = 1;
109182 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
109183 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
109184 +}
109185 +
109186 +extern struct gimple_opt_pass pass_dce;
109187 +
109188 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
109189 +{
109190 + int i;
109191 + const char * const plugin_name = plugin_info->base_name;
109192 + const int argc = plugin_info->argc;
109193 + const struct plugin_argument * const argv = plugin_info->argv;
109194 + bool enable = true;
109195 +
109196 + struct register_pass_info size_overflow_pass_info = {
109197 + .pass = &size_overflow_pass.pass,
109198 + .reference_pass_name = "mudflap2",
109199 + .ref_pass_instance_number = 1,
109200 + .pos_op = PASS_POS_INSERT_BEFORE
109201 + };
109202 +
109203 + struct register_pass_info dce_pass_info = {
109204 + .pass = &pass_dce.pass,
109205 + .reference_pass_name = "mudflap2",
109206 + .ref_pass_instance_number = 1,
109207 + .pos_op = PASS_POS_INSERT_BEFORE
109208 + };
109209 +
109210 + if (!plugin_default_version_check(version, &gcc_version)) {
109211 + error(G_("incompatible gcc/plugin versions"));
109212 + return 1;
109213 + }
109214 +
109215 + for (i = 0; i < argc; ++i) {
109216 + if (!(strcmp(argv[i].key, "no-size_overflow"))) {
109217 + enable = false;
109218 + continue;
109219 + }
109220 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
109221 + }
109222 +
109223 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
109224 + if (enable) {
109225 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
109226 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
109227 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dce_pass_info);
109228 + }
109229 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
109230 +
109231 + return 0;
109232 +}
109233 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
109234 new file mode 100644
109235 index 0000000..b87ec9d
109236 --- /dev/null
109237 +++ b/tools/gcc/stackleak_plugin.c
109238 @@ -0,0 +1,313 @@
109239 +/*
109240 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
109241 + * Licensed under the GPL v2
109242 + *
109243 + * Note: the choice of the license means that the compilation process is
109244 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
109245 + * but for the kernel it doesn't matter since it doesn't link against
109246 + * any of the gcc libraries
109247 + *
109248 + * gcc plugin to help implement various PaX features
109249 + *
109250 + * - track lowest stack pointer
109251 + *
109252 + * TODO:
109253 + * - initialize all local variables
109254 + *
109255 + * BUGS:
109256 + * - none known
109257 + */
109258 +#include "gcc-plugin.h"
109259 +#include "config.h"
109260 +#include "system.h"
109261 +#include "coretypes.h"
109262 +#include "tree.h"
109263 +#include "tree-pass.h"
109264 +#include "flags.h"
109265 +#include "intl.h"
109266 +#include "toplev.h"
109267 +#include "plugin.h"
109268 +//#include "expr.h" where are you...
109269 +#include "diagnostic.h"
109270 +#include "plugin-version.h"
109271 +#include "tm.h"
109272 +#include "function.h"
109273 +#include "basic-block.h"
109274 +#include "gimple.h"
109275 +#include "rtl.h"
109276 +#include "emit-rtl.h"
109277 +
109278 +extern void print_gimple_stmt(FILE *, gimple, int, int);
109279 +
109280 +int plugin_is_GPL_compatible;
109281 +
109282 +static int track_frame_size = -1;
109283 +static const char track_function[] = "pax_track_stack";
109284 +static const char check_function[] = "pax_check_alloca";
109285 +static bool init_locals;
109286 +
109287 +static struct plugin_info stackleak_plugin_info = {
109288 + .version = "201203140940",
109289 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
109290 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
109291 +};
109292 +
109293 +static bool gate_stackleak_track_stack(void);
109294 +static unsigned int execute_stackleak_tree_instrument(void);
109295 +static unsigned int execute_stackleak_final(void);
109296 +
109297 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
109298 + .pass = {
109299 + .type = GIMPLE_PASS,
109300 + .name = "stackleak_tree_instrument",
109301 + .gate = gate_stackleak_track_stack,
109302 + .execute = execute_stackleak_tree_instrument,
109303 + .sub = NULL,
109304 + .next = NULL,
109305 + .static_pass_number = 0,
109306 + .tv_id = TV_NONE,
109307 + .properties_required = PROP_gimple_leh | PROP_cfg,
109308 + .properties_provided = 0,
109309 + .properties_destroyed = 0,
109310 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
109311 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
109312 + }
109313 +};
109314 +
109315 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
109316 + .pass = {
109317 + .type = RTL_PASS,
109318 + .name = "stackleak_final",
109319 + .gate = gate_stackleak_track_stack,
109320 + .execute = execute_stackleak_final,
109321 + .sub = NULL,
109322 + .next = NULL,
109323 + .static_pass_number = 0,
109324 + .tv_id = TV_NONE,
109325 + .properties_required = 0,
109326 + .properties_provided = 0,
109327 + .properties_destroyed = 0,
109328 + .todo_flags_start = 0,
109329 + .todo_flags_finish = TODO_dump_func
109330 + }
109331 +};
109332 +
109333 +static bool gate_stackleak_track_stack(void)
109334 +{
109335 + return track_frame_size >= 0;
109336 +}
109337 +
109338 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
109339 +{
109340 + gimple check_alloca;
109341 + tree fntype, fndecl, alloca_size;
109342 +
109343 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
109344 + fndecl = build_fn_decl(check_function, fntype);
109345 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
109346 +
109347 + // insert call to void pax_check_alloca(unsigned long size)
109348 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
109349 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
109350 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
109351 +}
109352 +
109353 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
109354 +{
109355 + gimple track_stack;
109356 + tree fntype, fndecl;
109357 +
109358 + fntype = build_function_type_list(void_type_node, NULL_TREE);
109359 + fndecl = build_fn_decl(track_function, fntype);
109360 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
109361 +
109362 + // insert call to void pax_track_stack(void)
109363 + track_stack = gimple_build_call(fndecl, 0);
109364 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
109365 +}
109366 +
109367 +#if BUILDING_GCC_VERSION == 4005
109368 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
109369 +{
109370 + tree fndecl;
109371 +
109372 + if (!is_gimple_call(stmt))
109373 + return false;
109374 + fndecl = gimple_call_fndecl(stmt);
109375 + if (!fndecl)
109376 + return false;
109377 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
109378 + return false;
109379 +// print_node(stderr, "pax", fndecl, 4);
109380 + return DECL_FUNCTION_CODE(fndecl) == code;
109381 +}
109382 +#endif
109383 +
109384 +static bool is_alloca(gimple stmt)
109385 +{
109386 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
109387 + return true;
109388 +
109389 +#if BUILDING_GCC_VERSION >= 4007
109390 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
109391 + return true;
109392 +#endif
109393 +
109394 + return false;
109395 +}
109396 +
109397 +static unsigned int execute_stackleak_tree_instrument(void)
109398 +{
109399 + basic_block bb, entry_bb;
109400 + bool prologue_instrumented = false, is_leaf = true;
109401 +
109402 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
109403 +
109404 + // 1. loop through BBs and GIMPLE statements
109405 + FOR_EACH_BB(bb) {
109406 + gimple_stmt_iterator gsi;
109407 +
109408 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
109409 + gimple stmt;
109410 +
109411 + stmt = gsi_stmt(gsi);
109412 +
109413 + if (is_gimple_call(stmt))
109414 + is_leaf = false;
109415 +
109416 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
109417 + if (!is_alloca(stmt))
109418 + continue;
109419 +
109420 + // 2. insert stack overflow check before each __builtin_alloca call
109421 + stackleak_check_alloca(&gsi);
109422 +
109423 + // 3. insert track call after each __builtin_alloca call
109424 + stackleak_add_instrumentation(&gsi);
109425 + if (bb == entry_bb)
109426 + prologue_instrumented = true;
109427 + }
109428 + }
109429 +
109430 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
109431 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
109432 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
109433 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
109434 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
109435 + return 0;
109436 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
109437 + return 0;
109438 +
109439 + // 4. insert track call at the beginning
109440 + if (!prologue_instrumented) {
109441 + gimple_stmt_iterator gsi;
109442 +
109443 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
109444 + if (dom_info_available_p(CDI_DOMINATORS))
109445 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
109446 + gsi = gsi_start_bb(bb);
109447 + stackleak_add_instrumentation(&gsi);
109448 + }
109449 +
109450 + return 0;
109451 +}
109452 +
109453 +static unsigned int execute_stackleak_final(void)
109454 +{
109455 + rtx insn;
109456 +
109457 + if (cfun->calls_alloca)
109458 + return 0;
109459 +
109460 + // keep calls only if function frame is big enough
109461 + if (get_frame_size() >= track_frame_size)
109462 + return 0;
109463 +
109464 + // 1. find pax_track_stack calls
109465 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
109466 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
109467 + rtx body;
109468 +
109469 + if (!CALL_P(insn))
109470 + continue;
109471 + body = PATTERN(insn);
109472 + if (GET_CODE(body) != CALL)
109473 + continue;
109474 + body = XEXP(body, 0);
109475 + if (GET_CODE(body) != MEM)
109476 + continue;
109477 + body = XEXP(body, 0);
109478 + if (GET_CODE(body) != SYMBOL_REF)
109479 + continue;
109480 + if (strcmp(XSTR(body, 0), track_function))
109481 + continue;
109482 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
109483 + // 2. delete call
109484 + insn = delete_insn_and_edges(insn);
109485 +#if BUILDING_GCC_VERSION >= 4007
109486 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
109487 + insn = delete_insn_and_edges(insn);
109488 +#endif
109489 + }
109490 +
109491 +// print_simple_rtl(stderr, get_insns());
109492 +// print_rtl(stderr, get_insns());
109493 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
109494 +
109495 + return 0;
109496 +}
109497 +
109498 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
109499 +{
109500 + const char * const plugin_name = plugin_info->base_name;
109501 + const int argc = plugin_info->argc;
109502 + const struct plugin_argument * const argv = plugin_info->argv;
109503 + int i;
109504 + struct register_pass_info stackleak_tree_instrument_pass_info = {
109505 + .pass = &stackleak_tree_instrument_pass.pass,
109506 +// .reference_pass_name = "tree_profile",
109507 + .reference_pass_name = "optimized",
109508 + .ref_pass_instance_number = 0,
109509 + .pos_op = PASS_POS_INSERT_BEFORE
109510 + };
109511 + struct register_pass_info stackleak_final_pass_info = {
109512 + .pass = &stackleak_final_rtl_opt_pass.pass,
109513 + .reference_pass_name = "final",
109514 + .ref_pass_instance_number = 0,
109515 + .pos_op = PASS_POS_INSERT_BEFORE
109516 + };
109517 +
109518 + if (!plugin_default_version_check(version, &gcc_version)) {
109519 + error(G_("incompatible gcc/plugin versions"));
109520 + return 1;
109521 + }
109522 +
109523 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
109524 +
109525 + for (i = 0; i < argc; ++i) {
109526 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
109527 + if (!argv[i].value) {
109528 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
109529 + continue;
109530 + }
109531 + track_frame_size = atoi(argv[i].value);
109532 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
109533 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
109534 + continue;
109535 + }
109536 + if (!strcmp(argv[i].key, "initialize-locals")) {
109537 + if (argv[i].value) {
109538 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
109539 + continue;
109540 + }
109541 + init_locals = true;
109542 + continue;
109543 + }
109544 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
109545 + }
109546 +
109547 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
109548 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
109549 +
109550 + return 0;
109551 +}
109552 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
109553 index 83b3dde..835bee7 100644
109554 --- a/usr/gen_init_cpio.c
109555 +++ b/usr/gen_init_cpio.c
109556 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, const char *location,
109557 int retval;
109558 int rc = -1;
109559 int namesize;
109560 - int i;
109561 + unsigned int i;
109562
109563 mode |= S_IFREG;
109564
109565 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_location)
109566 *env_var = *expanded = '\0';
109567 strncat(env_var, start + 2, end - start - 2);
109568 strncat(expanded, new_location, start - new_location);
109569 - strncat(expanded, getenv(env_var), PATH_MAX);
109570 - strncat(expanded, end + 1, PATH_MAX);
109571 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
109572 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
109573 strncpy(new_location, expanded, PATH_MAX);
109574 + new_location[PATH_MAX] = 0;
109575 } else
109576 break;
109577 }
109578 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
109579 index 4f3434f..fc63040 100644
109580 --- a/virt/kvm/kvm_main.c
109581 +++ b/virt/kvm/kvm_main.c
109582 @@ -43,6 +43,8 @@
109583 #include <linux/swap.h>
109584 #include <linux/bitops.h>
109585 #include <linux/spinlock.h>
109586 +#include <linux/namei.h>
109587 +#include <linux/fs.h>
109588
109589 #include <asm/processor.h>
109590 #include <asm/io.h>
109591 @@ -575,12 +577,73 @@ out:
109592 return r;
109593 }
109594
109595 +/*
109596 + * We want to test whether the caller has been granted permissions to
109597 + * use this device. To be able to configure and control the device,
109598 + * the user needs access to PCI configuration space and BAR resources.
109599 + * These are accessed through PCI sysfs. PCI config space is often
109600 + * passed to the process calling this ioctl via file descriptor, so we
109601 + * can't rely on access to that file. We can check for permissions
109602 + * on each of the BAR resource files, which is a pretty clear
109603 + * indicator that the user has been granted access to the device.
109604 + */
109605 +static int probe_sysfs_permissions(struct pci_dev *dev)
109606 +{
109607 +#ifdef CONFIG_SYSFS
109608 + int i;
109609 + bool bar_found = false;
109610 +
109611 + for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
109612 + char *kpath, *syspath;
109613 + struct path path;
109614 + struct inode *inode;
109615 + int r;
109616 +
109617 + if (!pci_resource_len(dev, i))
109618 + continue;
109619 +
109620 + kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
109621 + if (!kpath)
109622 + return -ENOMEM;
109623 +
109624 + /* Per sysfs-rules, sysfs is always at /sys */
109625 + syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
109626 + kfree(kpath);
109627 + if (!syspath)
109628 + return -ENOMEM;
109629 +
109630 + r = kern_path(syspath, LOOKUP_FOLLOW, &path);
109631 + kfree(syspath);
109632 + if (r)
109633 + return r;
109634 +
109635 + inode = path.dentry->d_inode;
109636 +
109637 + r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
109638 + path_put(&path);
109639 + if (r)
109640 + return r;
109641 +
109642 + bar_found = true;
109643 + }
109644 +
109645 + /* If no resources, probably something special */
109646 + if (!bar_found)
109647 + return -EPERM;
109648 +
109649 + return 0;
109650 +#else
109651 + return -EINVAL; /* No way to control the device without sysfs */
109652 +#endif
109653 +}
109654 +
109655 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
109656 struct kvm_assigned_pci_dev *assigned_dev)
109657 {
109658 int r = 0;
109659 struct kvm_assigned_dev_kernel *match;
109660 struct pci_dev *dev;
109661 + u8 header_type;
109662
109663 down_read(&kvm->slots_lock);
109664 mutex_lock(&kvm->lock);
109665 @@ -607,6 +670,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
109666 r = -EINVAL;
109667 goto out_free;
109668 }
109669 +
109670 + /* Don't allow bridges to be assigned */
109671 + pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
109672 + if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
109673 + r = -EPERM;
109674 + goto out_put;
109675 + }
109676 +
109677 + r = probe_sysfs_permissions(dev);
109678 + if (r)
109679 + goto out_put;
109680 +
109681 if (pci_enable_device(dev)) {
109682 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
109683 r = -EBUSY;
109684 @@ -2494,7 +2569,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
109685 if (kvm_rebooting)
109686 /* spin while reset goes on */
109687 while (true)
109688 - ;
109689 + cpu_relax();
109690 /* Fault while not rebooting. We want the trace. */
109691 BUG();
109692 }
109693 @@ -2714,7 +2789,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
109694 kvm_arch_vcpu_put(vcpu);
109695 }
109696
109697 -int kvm_init(void *opaque, unsigned int vcpu_size,
109698 +int kvm_init(const void *opaque, unsigned int vcpu_size,
109699 struct module *module)
109700 {
109701 int r;
109702 @@ -2767,15 +2842,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
109703 /* A kmem cache lets us meet the alignment requirements of fx_save. */
109704 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
109705 __alignof__(struct kvm_vcpu),
109706 - 0, NULL);
109707 + SLAB_USERCOPY, NULL);
109708 if (!kvm_vcpu_cache) {
109709 r = -ENOMEM;
109710 goto out_free_5;
109711 }
109712
109713 - kvm_chardev_ops.owner = module;
109714 - kvm_vm_fops.owner = module;
109715 - kvm_vcpu_fops.owner = module;
109716 + pax_open_kernel();
109717 + *(void **)&kvm_chardev_ops.owner = module;
109718 + *(void **)&kvm_vm_fops.owner = module;
109719 + *(void **)&kvm_vcpu_fops.owner = module;
109720 + pax_close_kernel();
109721
109722 r = misc_register(&kvm_dev);
109723 if (r) {